diff --git "a/6203.jsonl" "b/6203.jsonl" new file mode 100644--- /dev/null +++ "b/6203.jsonl" @@ -0,0 +1,619 @@ +{"seq_id":"144705994","text":"from main import cursor\nfrom threadfunk import diameter\nfrom threadfunk import pitch\n\n\ndef relateid(diametername, pitchname):\n \"\"\"\n :type diametername: object\n :type pitchname: object\n \"\"\"\n tid = diameter.getid(diametername)\n pid = pitch.getid(pitchname)\n\n cursor.execute('''SELECT Pitch2ThreadDiameterID\n FROM LocknutData.dbo.Pitch2ThreadDiameter\n WHERE PitchID = %i\n AND ThreadDiameterID = %i''' % (pid, tid))\n if cursor.rowcount == 0:\n cursor.execute('''INSERT INTO LocknutData.dbo.Pitch2ThreadDiameter\n (PitchID, ThreadDiameterID)\n VALUES (%i, %i);'''\n % (pid, tid))\n cursor.commit()\n cursor.execute('''SELECT Pitch2ThreadDiameterID\n FROM LocknutData.dbo.Pitch2ThreadDiameter\n WHERE PitchID = %i\n AND ThreadDiameterID = %i''' % (pid, tid))\n return cursor.fetchone()[0]\n\n\ndef getid(designation):\n cursor.execute('''SELECT PitchThreadDiameterDesignationID \n FROM LocknutData.dbo.PitchThreadDiameterDesignation\n WHERE Name = '{}';'''\n .format(designation))\n if cursor.rowcount == 0:\n cursor.execute('''INSERT INTO LocknutData.dbo.PitchThreadDiameterDesignation\n (Name)\n VALUES ('{}');'''\n .format(designation))\n cursor.commit()\n cursor.execute('''SELECT PitchThreadDiameterDesignationID \n FROM LocknutData.dbo.PitchThreadDiameterDesignation\n WHERE Name = '{}';'''\n .format(designation))\n return cursor.fetchone()[0]\n cursor.close()\n\n\n#def name(part):\n","sub_path":"designation.py","file_name":"designation.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146604415","text":"import os\nimport sys\nimport subprocess\n\n\ndef test_facs():\n \"\"\"\n this is a simple FACS test users use a `test` borough\n \"\"\"\n location = 'test'\n TS = 'extend-lockdown'\n TM = 1\n ci_multiplier = 0.475\n output_dir = '.'\n data_dir = 'covid_data'\n start_date = \"3/1/2020\"\n facs_args = ['--generic_outfile']\n simulation_period = 60\n\n facs_args.append(\"--quicktest\")\n facs_args.append(\"--location=%s\" % (location))\n facs_args.append(\"--transition_scenario=%s\" % (TS))\n facs_args.append(\"--transition_mode=%d\" % (TM))\n facs_args.append(\"--ci_multiplier=%f\" % (ci_multiplier))\n facs_args.append(\"--output_dir=%s\" % (output_dir))\n facs_args.append(\"--data_dir=%s\" % (data_dir))\n facs_args.append(\"--start_date=%s\" % (start_date))\n facs_args.append(\"--simulation_period=%d\" % (simulation_period))\n\n # prepare CMD\n CMD = [\"python3\", \"run.py\"]\n CMD.extend(facs_args)\n\n proc = subprocess.Popen(CMD,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n\n stderr = '{}'.format(stderr)\n stderr = '{}'.format(stderr)\n\n assert(stderr.find('Simulation complete') >= 0)\n\nif __name__ == \"__main__\":\n test_facs()\n","sub_path":"tests/test_facs.py","file_name":"test_facs.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510842597","text":"from django.db import models\n\n# Create your models here.\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom .validators import UnicodeUsernameValidator\nfrom django.contrib.auth.models import Group\nfrom .manager import UserManager\nfrom django.contrib.auth.models import (\n BaseUserManager, AbstractBaseUser, AbstractUser, PermissionsMixin\n)\n\n\nclass Settings(models.Model):\n max_show_seconds = models.IntegerField(\n _(\"Maximum seconds to show each iamge\"), default=5)\n\n\nclass Album(models.Model):\n title = models.CharField(max_length=100, blank=False, null=False)\n description = models.TextField(_(\"Description\"), blank=True, null=False)\n\n \"\"\" Specifies if this album should be the default album to served to the home page \"\"\"\n is_default = models.BooleanField()\n\n def ___str__(self):\n return self.name\n\n\nclass Image(models.Model):\n imageFile = models.ImageField(upload_to=\"images/\")\n album = models.ForeignKey(\n Album, on_delete=models.CASCADE, blank=False, null=False)\n description = models.TextField(_(\"Description\"), blank=True, null=True)\n dateCreated = models.DateTimeField(\n _(\"Date Created\"), auto_now=False, auto_now_add=True)\n\n def __str__(self):\n return self.imageFile.name\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(\n max_length=255, unique=True, null=False, blank=False,\n error_messages={\n 'unique': _(\"A user with that email already exists.\"),\n },)\n username_validator = UnicodeUsernameValidator()\n\n is_staff = models.BooleanField(\n _('staff status'),\n default=False,\n help_text=_(\n 'Designates whether the user can log into this admin site.'),\n )\n is_active = models.BooleanField(\n _('active'),\n default=True,\n help_text=_(\n 'Designates whether this user should be treated as active. '\n 'Unselect this instead of deleting accounts.'\n ),\n )\n\n date_joined = models.DateTimeField(_('date joined'), default=timezone.now)\n\n REQUIRED_FIELDS = [] # Email & Password are required by default.\n objects = UserManager()\n USERNAME_FIELD = 'email'\n date_joined = models.DateTimeField(auto_now_add=True)\n\n def get_full_name(self):\n # The user is identified by their email address\n return self.email\n\n def get_short_name(self):\n # The user is identified by their email address\n return self.email\n\n def __str__(self): # __unicode__ on Python 2\n return self.email\n\n def email_used(self, email=None):\n \"\"\"\n This methods throws an exception if the given parameters does not\n match any user.\n \"\"\"\n\n if get_user_model().objects.filter(email__iexact=email).first():\n raise ValueError(\"A user with that email already exist.\")\n\n return False\n\n def user_exist(self, email):\n try:\n self.does_user_exist(email)\n # the user does not exist\n return False\n except ValueError as identifier:\n # the user exist\n return True\n\n def __str__(self):\n return self.get_full_name()\n\n def has_perm(self, perm, obj=None):\n \"Does the user have a specific permission?\"\n # Simplest possible answer: Yes, always\n return True\n\n def has_module_perms(self, app_label):\n \"Does the user have permissions to view the app `app_label`?\"\n # Simplest possible answer: Yes, always\n return True\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"645152294","text":"\"\"\"\nLandon Buell\nAlejandro Hausner\nCS 417.01\n31 August 2020\n\"\"\"\n\n #### IMPORTS ####\n\nimport numpy as np\nimport os\nimport sys\n\n #### CLASS DEFINITIONS ####\n\nclass Task04:\n \"\"\" Execution Object For Assignment01 - Question 4 \"\"\"\n\n def __init__(self,file1,file2):\n \"\"\" Initialize Class Objects Instance \"\"\"\n self.files = [file1,file2]\n self.fileContents = []\n \n def FilesExists(self):\n \"\"\" Test if List of Files does not exist \"\"\"\n for file in self.files: # each file\n if os.path.isfile(file) == False: # make sure it exists\n raise FileNotFoundError() # raise error\n sys.exit(1) # exit\n return self # return instance\n\n def ReadFiles(self):\n \"\"\" Read each file from either file \"\"\"\n for fileToRead in self.files:\n fileObject = open(fileToRead,mode=\"r\") # file object\n fileContents = fileObject.readlines() # store lines in list\n fileObject.close()\n for line in fileContents: # each line\n line.rstrip(\"\\n\")\n self.fileContents.append(fileContents) # add to self\n return self\n\n def SameContents(self):\n \"\"\" Test of 2 files have the same Content \"\"\"\n if len(self.fileContents[0]) != len(self.fileContents[1]): # different num of lines\n return False\n for lineA,lineB in zip(self.fileContents[0],self.fileContents[1]):\n # Iterate through both files\n if lineA == lineB: # lines are the same\n continue # keep going \n else: # not the same\n return False # break, files are diff\n return True # files are the same\n\n #### MAIN EXECUTABLE ####\n \nif __name__ == '__main__':\n\n # Accept Command Line Input\n if len(sys.argv) != 3:\n print('Usage:\\n python diff.py ')\n sys.exit(1)\n\n fileName1 = sys.argv[1]\n fileName2 = sys.argv[2]\n\n # Execution\n Executable = Task04(fileName1,fileName2)\n Executable.FilesExists() # tests if files exist!\n Executable.ReadFiles() # read files\n\n if (Executable.SameContents() == True): # test if lines match\n print(\"Files\",fileName1,\"and\",fileName2,\"are the same!\")\n else:\n print(\"Files\",fileName1,\"and\",fileName2,\"are not the same!\")\n\n \n\n","sub_path":"Assignment1/Assignment01/q4-diff.py","file_name":"q4-diff.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149145331","text":"'''Utilizando for\nfor c in (1,10)\n print(c)\nprint('Fim')'''\n\n#Utilizando WHILE para executar os comandos acima\n'''c = 1\nwhile c < 10:\n print(c)\n c += 1\nprint('Fim')'''\n\nqtdPar = 0\nqtdImpar = 0\ntotNum = 0\nn = 1\nwhile n != 0:\n n = int(input('digite o número: '))\n if n != 0:\n totNum += 1\n if n % 2 == 0:\n qtdPar += 1\n else:\n qtdImpar += 1\nprint('Programa Encerrado! \\nTotal de valores digitados: {} \\nTotal de valores pares: {} \\nTotal de valores ímpares: {}'.format(totNum,qtdPar,qtdImpar))\n\n","sub_path":"Aulas/Estruturas de Controle/Aula14a.py","file_name":"Aula14a.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313936269","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/extenteten/softmax.py\n# Compiled at: 2017-01-06 05:01:09\n# Size of source mod 2**32: 877 bytes\nimport tensorflow as tf\nfrom . import batch\nfrom .util import static_rank, func_scope, dtype_min, dtype_epsilon\n__all__ = [\n 'softmax']\n\n@func_scope()\ndef softmax(vector, sequence_length=None):\n assert static_rank(vector) == 2\n if sequence_length is None:\n return tf.nn.softmax(vector)\n else:\n return _dynamic_softmax(vector, sequence_length)\n\n\n@func_scope()\ndef _dynamic_softmax(vector, sequence_length):\n mask_ = tf.cast(tf.sequence_mask(sequence_length, tf.shape(vector)[1]), vector.dtype)\n vector_with_min = mask_ * vector + (1 - mask_) * dtype_min(vector.dtype)\n unnormal_dist = tf.exp(vector_with_min - batch.max(vector_with_min, keep_dims=True)) * mask_\n return unnormal_dist / (batch.sum(unnormal_dist, keep_dims=True) + dtype_epsilon(unnormal_dist.dtype))","sub_path":"pycfiles/tensorflow_extenteten-0.0.22-py3.6/softmax.cpython-36.py","file_name":"softmax.cpython-36.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"36787576","text":"import re\r\n\r\ndef convert_special_word(val):\r\n if val.lower() == 'null': return None\r\n if val.lower() == 'false': return False\r\n if val.lower() == 'true': return True\r\n return val\r\n\r\ndef collect_request(qdict): #qdict must be a QueryDict\r\n\r\n #print 'collect_request: qdict = ',qdict\r\n pt = re.compile(r'(?P[a-zA-Z0-9_\\-]+)\\[(?P[^\\[\\]]+)\\]')\r\n ret = {}\r\n for k,v in qdict.lists():\r\n #print 'collect_request > k=%s , v=%s' % (k,v)\r\n #if k in ('self','request'): continue\r\n v = [ convert_special_word(x) for x in v ]\r\n if len(v)==1:\r\n v = v[0]\r\n m = pt.match(k)\r\n if m:\r\n name = m.group('name')\r\n key = m.group('key')\r\n #print 'name=%s,key=%s' % (name,key)\r\n if ret.has_key(name):\r\n ret[name][key] = v\r\n else:\r\n ret[name] = {key:v}\r\n else:\r\n ret[k] = v\r\n #print 'collect_request: ret = ', ret\r\n return ret","sub_path":"phoenix/handlers/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272162843","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport urlparse\n\nimport raven\nimport requests\nimport BeautifulSoup\n\nimport post\nimport attachment\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9'\n\nclass PostListParser(object):\n\n def __init__(self, url=None):\n self.url = url\n \n self.hostname = urlparse.urlparse(url=self.url).hostname\n self.session = requests.Session()\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.url\n }\n\n def parse(self):\n response = self._get_response()\n response.encoding = 'euc-kr'\n if not response.status_code / 100 == 2:\n return []\n\n doc = BeautifulSoup.BeautifulSoup(response.text)\n \n board_list = doc.find('div', attrs={'class': 'bbs'})\n if not board_list:\n return []\n \n board_list = board_list.find('ul', attrs={'class': 'bbsList'})\n if not board_list:\n return []\n\n elements = list(reversed(board_list.findAll('li')))\n\n posts = []\n\n for e in elements:\n a = e.find('a')\n if not a:\n continue\n\n href = a.get('href')\n if not href:\n continue\n\n url = 'http://' + self.hostname + '/new/' + href\n \n p = post.Post(title=None, url=url)\n posts.append(p)\n\n return posts\n\n def _get_response(self):\n return self.session.get(self.url, headers=self.headers)\n \nclass PostParser(object):\n\n def __init__(self, session=None, post=None):\n self.session = session\n self.post = post\n\n self.hostname = urlparse.urlparse(url=self.post.url).hostname\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.post.url\n }\n\n def parse(self):\n response = self._get_response(self.post.url)\n response.encoding = 'euc-kr'\n if not response.status_code / 100 == 2:\n return self.post\n\n doc = BeautifulSoup.BeautifulSoup(response.text)\n board_view = doc.find('div', attrs={'class': 'bbs view'})\n \n if not board_view:\n return self.post\n \n title = board_view.find('h4').text.strip()\n title = str(unicode(title))\n \n info = board_view.find('div', attrs={'class': 'info'})\n info_text = info.text.strip()\n info_text = str(unicode(info_text))\n \n title = title.replace(info_text, '').strip()\n title = ' '.join(title.split())\n \n post = board_view.find('div', attrs={'class': 'cont'})\n\n # links\n links = {}\n \n urls = []\n for a in info.findAll('a'):\n href = a.get('href')\n if not href:\n continue\n urls.append(href)\n\n for a in post.findAll('a'):\n href = a.get('href')\n if not href:\n continue\n urls.append(href)\n\n for u in urls:\n lower = u.lower()\n if '.jpg' in lower or '.png' in lower or '.gif' in lower:\n continue\n if not 'http' in u:\n continue\n if not 's.ppomppu.co.kr' in u:\n if not links.get(u, None):\n links[u] = True\n continue\n response = requests.get(u, params={'encode':'on'})\n if not response.status_code / 100 == 2:\n continue\n splited = response.content.split(\"document.location.replace('\")[-1]\n splited = splited.split(\"')\")[0]\n if not 'http' in splited:\n continue\n if splited.count('.') == 0:\n continue\n if not links.get(splited, None):\n links[splited] = True\n\n # images\n images = []\n for img in post.findAll('img'):\n src = img.get('src')\n if not src:\n continue\n if not 'http' in src:\n continue\n response = self._get_response(src)\n if not response.status_code / 100 == 2:\n continue\n if not response.content:\n continue\n image = attachment.Image(response.content)\n images.append(image)\n\n self.post.title = title\n self.post.links = links.keys()\n self.post.images = images\n return self.post\n\n def _get_response(self, url=None):\n return self.session.get(url, headers=self.headers)\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"488048223","text":"\"\"\"\nUseful form fields for use with the Peewee ORM.\n(cribbed from wtforms.ext.django.fields)\n\"\"\"\nimport operator\nimport warnings\n\nfrom wtforms import widgets\nfrom wtforms.fields import SelectFieldBase\nfrom wtforms.validators import ValidationError\n\n\n__all__ = (\n 'ModelSelectField', 'SelectQueryField',\n)\n\n\nclass SelectQueryField(SelectFieldBase):\n \"\"\"\n Given a SelectQuery either at initialization or inside a view, will display a\n select drop-down field of choices. The `data` property actually will\n store/keep an ORM model instance, not the ID. Submitting a choice which is\n not in the queryset will result in a validation error.\n\n Specify `get_label` to customize the label associated with each option. If\n a string, this is the name of an attribute on the model object to use as\n the label text. If a one-argument callable, this callable will be passed\n model instance and expected to return the label text. Otherwise, the model\n object's `__unicode__` will be used.\n\n If `allow_blank` is set to `True`, then a blank choice will be added to the\n top of the list. Selecting this choice will result in the `data` property\n being `None`. The label for the blank choice can be set by specifying the\n `blank_text` parameter.\n \"\"\"\n widget = widgets.Select()\n\n def __init__(self, label=None, validators=None, query=None, get_label=None, allow_blank=False, blank_text=u'', **kwargs):\n super(SelectQueryField, self).__init__(label, validators, **kwargs)\n self.allow_blank = allow_blank\n self.blank_text = blank_text\n self.query = query\n self.model = query.model\n self._set_data(None)\n\n if get_label is None:\n self.get_label = lambda o: unicode(o)\n elif isinstance(get_label, basestring):\n self.get_label = operator.attrgetter(get_label)\n else:\n self.get_label = get_label\n\n def get_model(self, pk):\n try:\n return self.query.get(**{\n self.model._meta.pk_name: pk\n })\n except self.query.model.DoesNotExist:\n pass\n\n def _get_data(self):\n if self._formdata is not None:\n self._set_data(self.get_model(self._formdata))\n return self._data\n\n def _set_data(self, data):\n self._data = data\n self._formdata = None\n\n data = property(_get_data, _set_data)\n \n def __call__(self, **kwargs):\n if 'value' in kwargs:\n self._set_data(self.get_model(kwargs['value']))\n return self.widget(self, **kwargs)\n\n def iter_choices(self):\n if self.allow_blank:\n yield (u'__None', self.blank_text, self.data is None)\n \n for obj in self.query.clone():\n yield (obj.get_pk(), self.get_label(obj), obj == self.data)\n\n def process_formdata(self, valuelist):\n if valuelist:\n if valuelist[0] == '__None':\n self.data = None\n else:\n self._data = None\n self._formdata = int(valuelist[0])\n\n def pre_validate(self, form):\n if self.data is not None:\n if not self.query.where(**{self.model._meta.pk_name: self.data.get_pk()}).exists():\n raise ValidationError(self.gettext('Not a valid choice'))\n elif not self.allow_blank:\n raise ValidationError(self.gettext('Selection cannot be blank'))\n\n\nclass SelectMultipleQueryField(SelectQueryField):\n widget = widgets.Select(multiple=True)\n \n def __init__(self, *args, **kwargs):\n kwargs.pop('allow_blank', None)\n super(SelectMultipleQueryField, self).__init__(*args, **kwargs)\n \n def get_model_list(self, pk_list):\n return list(self.query.where(**{\n '%s__in' % self.model._meta.pk_name: pk_list\n }))\n \n def _get_data(self):\n if self._formdata is not None:\n self._set_data(self.get_model_list(self._formdata))\n return self._data or []\n\n def _set_data(self, data):\n self._data = data\n self._formdata = None\n\n data = property(_get_data, _set_data)\n \n def __call__(self, **kwargs):\n if 'value' in kwargs:\n self._set_data(self.get_model_list(kwargs['value']))\n return self.widget(self, **kwargs)\n\n def iter_choices(self):\n for obj in self.query.clone():\n yield (obj.get_pk(), self.get_label(obj), obj in self.data)\n\n def process_formdata(self, valuelist):\n if valuelist:\n self._data = []\n self._formdata = map(int, valuelist)\n\n def pre_validate(self, form):\n if self.data:\n if not self.query.where(**{'%s__in' % self.model._meta.pk_name: [\n model.get_pk() for model in self.data\n ]}).count() == len(self.data):\n raise ValidationError(self.gettext('Not a valid choice'))\n\n\nclass ModelSelectField(SelectQueryField):\n \"\"\"\n Like a SelectQueryField, except takes a model class instead of a\n queryset and lists everything in it.\n \"\"\"\n def __init__(self, label=None, validators=None, model=None, **kwargs):\n super(ModelSelectField, self).__init__(label, validators, query=model.select(), **kwargs)\n\n\nclass ModelSelectMultipleField(SelectMultipleQueryField):\n \"\"\"\n Like a SelectMultipleQueryField, except takes a model class instead of a\n queryset and lists everything in it.\n \"\"\"\n def __init__(self, label=None, validators=None, model=None, **kwargs):\n super(ModelSelectMultipleField, self).__init__(label, validators, query=model.select(), **kwargs)\n","sub_path":"wtfpeewee/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547030306","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 8 10:24:19 2017\r\n\r\n@author: DVJ8411\r\n\"\"\"\r\nimport random\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import linear_model\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\n\"\"\"\r\nImportation de la BD\r\n\"\"\"\r\nbd = pd.read_csv(\"BD CELI_NOUV v1.csv\")\r\n\r\n#base = bd[['ECHANT']]\r\n\r\nbd_d = bd.loc[bd['ECHANT'] == 'D']\r\nbd_v = bd.loc[bd['ECHANT'] == 'V']\r\ny_d = bd_d[\"DEFAUTS\"]\r\ny_v = bd_v[\"DEFAUTS\"]\r\n\r\n\r\n\r\n\"\"\"\r\nCréation de la liste des variables explicatives (débutant par T)\r\n\"\"\"\r\nvariables_names = list(bd)\r\nvar_n = []\r\nfor t in variables_names:\r\n if t[:1] == 'T':\r\n var_n.append(t)\r\n\r\n\r\n#choix_var = ['var1','var2','var3']\r\n#choix_var = np.asarray(choix_var)\r\n\r\n\r\n#resu_roc = np.zeros(shape=(10,2))\r\n\r\n\r\n#choix_var = ['var1','var2','var3','var4','var5','var6']\r\nchoix_var = ['var1','var2','var3']\r\n\r\ncv = pd.DataFrame(columns=choix_var)\r\n\r\nroc_d_v = ['ROC_D','ROC_V']\r\n\r\nrroc = pd.DataFrame(columns=roc_d_v)\r\n\r\n\r\nn_iter = 10 # NOMBRE de SIMULATIONS\r\n \r\nfor i in range(n_iter):\r\n \r\n var_rnd = random.sample(var_n, 3) # nombre de variable\r\n\r\n cv.loc[i]=var_rnd\r\n\r\n x_rnd = bd[['ECHANT']]\r\n\r\n for column in var_rnd:\r\n dummies = pd.get_dummies(bd[column], prefix = column, drop_first = True)\r\n x_rnd[dummies.columns] = dummies\r\n \r\n x_d = x_rnd[x_rnd['ECHANT'] == 'D']\r\n x_v = x_rnd[x_rnd['ECHANT'] == 'V']\r\n x_d = x_d.drop(['ECHANT'], 1)\r\n x_v = x_v.drop(['ECHANT'], 1)\r\n \r\n \r\n clf = MLPClassifier(solver='lbfgs', activation='logistic', alpha=1e-5, hidden_layer_sizes=(3,), random_state=1,\r\n max_iter=1000)\r\n clf.fit(x_d, y_d)\r\n \r\n pdn_d=clf.predict_proba(x_d) \r\n predn_d = pdn_d[:,1]\r\n roc_d=roc_auc_score(y_d, predn_d)#, sample_weight= We_d))\r\n pdn_v=clf.predict_proba(x_v) \r\n predn_v = pdn_v[:,1]\r\n roc_v=roc_auc_score(y_v, predn_v)#, sample_weight= We_d))\r\n\r\n #resu_roc[i,0] = roc_d\r\n #resu_roc[i,1] = roc_v\r\n temp_roc = [roc_d,roc_v] \r\n rroc.loc[i]=temp_roc\r\n\r\n#result = rroc.sort(['ROC_V'], ascending=[0]) #1 pour croissant \r\n\r\nresult = pd.concat([rroc, cv], axis=1) \r\nresult = result.sort(['ROC_D'], ascending=[0]) #1 pour croissant \r\nresult['ROC_D_r'] = result['ROC_D'].rank(method='first', ascending=0) #crée un variable de ranking \r\n\r\n\r\n\r\nn1=random.random() #variable aleatoire entre 0 et 1\r\nif n1 <= 0.25:\r\n p1 = n1 * ((n_iter * 0.10) / 0.25) # 25% de chance de prendre un parent parmi les 10% des meilleurs \r\nelif n1 <= 0.45:\r\n p1 = ( (n1-0.25) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.10)# 20% pour 11-20%\r\nelif n1 <= 0.60:\r\n p1 = ( (n1-0.45) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.20)# 15% pour 21-30%\r\nelif n1 <= 0.72:\r\n p1 = ( (n1-0.60) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.30)# 12% pour 31-40%\r\nelif n1 <= 0.82:\r\n p1 = ( (n1-0.72) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.40)# 10% pour 41-50%\r\nelif n1 <= 0.89:\r\n p1 = ( (n1-0.82) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.50)# 7% pour 51-60%\r\nelif n1 <= 0.94:\r\n p1 = ( (n1-0.89) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.60)# 5% pour 61-70%\r\nelif n1 <= 0.97:\r\n p1 = ( (n1-0.94) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.70)# 3% pour 71-80%\r\nelif n1 <= 0.99:\r\n p1 = ( (n1-0.97) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.80)# 2% pour 81-90%\r\nelif n1 <= 1.00:\r\n p1 = ( (n1-0.99) * ((n_iter * 0.10) / 0.20) ) + (1000 * 0.90)#* 1% pour 91-100%\r\n\r\nprint(n1)\r\nprint(p1)\r\n","sub_path":"Selection variable pour NN v1.py","file_name":"Selection variable pour NN v1.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533428102","text":"\"\"\"Program for monitoring resources.\n\nThis is a script that will run indefinitely (until interrupted by a signal of some sort), and periodically extract\nsystem resource utilisation information (CPU, memory, etc.), and store them in a database for subsequent use by\nother analysis tools.\n\"\"\"\n\nimport argparse\nimport json\nfrom time import sleep\nimport errno\nimport sys\nimport time\n\nfrom com.mitel.pyrate import logger\nfrom com.mitel.pyrate.arg_parser_util import parse_timespec\nfrom com.mitel.pyrate.resmon_util import start_monitor, list_dbs\nfrom com.mitel.pyrate.rrdutil import get_info\n\n\ndef execute_list_dbs():\n \"\"\"List all the resmon databases found in the working directory.\n\n :return: The list of all the databases found, in JSON string format.\n :rtype: str\n \"\"\"\n db_names = list_dbs()\n db_recs = []\n for db in db_names:\n db_info = {'name': db}\n\n # We use the cpu RRD to get some of the common database parameters such as sample rate and so on.\n info = get_info(db + 'cpu.rrd')\n\n # We only want a subset of all the information from the RRD\n db_info['period'] = info['step']\n db_info['last_update'] = info['last_update']\n db_recs.append(db_info)\n ret = dict()\n ret['dbs'] = db_recs;\n ret['timestamp'] = time.time()\n return json.dumps(ret)\n\n\ndef main(args):\n \"\"\"Main entry point of this program.\n\n :param args: The command-line argument list\n :return: Exit code\n :rtype: int\n \"\"\"\n parser = argparse.ArgumentParser(prog='resmon.py', description='Monitor the system resource utilisation, periodically')\n top = parser\n group1 = top.add_argument_group()\n group1.add_argument('-n', metavar='name', help='Name of the resource monitoring session, artifacts will follow this'\n ' name', type=str)\n group1.add_argument('-p', metavar='period', help='The sampling period of the monitoring process. E.g. 10s, 1m, 2h.'\n ' Default is 1m', default='1m', type=str)\n group2 = top.add_argument_group()\n group2.add_argument('-l', help='List all the resmon databases found in the working directory and return.',\n action='count')\n arg = parser.parse_args(args)\n\n if arg.l:\n print(execute_list_dbs())\n return 0\n\n if not arg.n:\n logger.error('No name specified')\n return errno.EINVAL\n\n if arg.p:\n period = parse_timespec(arg.p)\n else:\n period = 60 # Default period is 1m\n\n monitor = start_monitor(arg.n, period)\n try:\n while 1: # Run until stopped\n # Wake up periodically and do something, like kicking a watchdog or something like that, not sure what yet.\n sleep(600)\n\n except KeyboardInterrupt:\n logger.info('User interrupted')\n return errno.EINTR\n\n finally:\n monitor.stop()\n logger.info('resmon terminated')\n\n return errno.EINVAL # If we are here we were interrupted by something other than KeyboardInterrupt\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"com/mitel/pyrate/resmon.py","file_name":"resmon.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"377938430","text":"\nfrom scrapy.spider import BaseSpider\nfrom scrapy.contrib.spiders import XMLFeedSpider\nfrom scrapy.selector import XmlXPathSelector\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.loader import XPathItemLoader\nfrom scrapy.http import Request\n\nfrom scrapy.http import Request\nfrom scrapy import log\n\nimport re\nimport json\nimport copy\n\nfrom getpodcasts.items import PodcastItem\n\n\n\n\n\nclass bbcSpider(XMLFeedSpider):\n\tname = \"bbc_disco\"\n\tfolder = \"disco\"\n\tstart_urls = [\n\t\t\"http://downloads.bbc.co.uk/podcasts/ppg.xml\"\n\t]\n\t# the date format in the feeds\n\tdateFormat = \"%a, %d %b %Y %H:%M:%S\"\n\t\n\tdef parse(self, response):\n\t\tx = XmlXPathSelector(response)\n\t\tx.register_namespace(\"xsi\", \"http://www.w3.org/2001/XMLSchema-instance\")\n\t\t\n\t\t#programs = x.select('./program[position()<3]')\n\t\tprograms = x.select('//program')\n\t\tpodcastCount = str(len(programs))\n\t\ti=0\n\t\tallitems=[]\n\t\tfor program in programs:\n\t\t\ti=i+1\n\t\t\tl = XPathItemLoader(PodcastItem(), selector=program)\n\t\t\t#the aaa allows sorting on id during disco process, meaning this is the auth verion to be suplemented by other docs if poss\n\t\t\tl.add_xpath('id', 'concat(\"aaa_bbc_\",./link[@target=\"feed\"]/@url)') \n\t\t\tl.add_value('audioType', 'disco')\n\t\t\tl.add_xpath('brandId', './systemRef[@systemId=\"pid.brand\"][position()=1]/@key')\n\t\t\tl.add_xpath('brandIds', './systemRef[@systemId=\"pid.brand\"]/@key')\n\t\t\tl.add_xpath('brandFeed', './link[@target=\"feed\"]/@url')\n\t\t\tl.add_xpath('brandName', './title/text()')\n\t\t\tl.add_xpath('brandRegions', './@region')\n\t\t\tl.add_xpath('brandShortName', './shortTitle/text()')\n\t\t\tl.add_xpath('brandDescription', './description/text()')\n\t\t\tl.add_xpath('brandHomepage', './link[@target=\"homepage\"]/@url')\n\t\t\tl.add_xpath('brandImage', './image/@url')\n\t\t\tl.add_xpath('brandTimes', './@frequency')\n\t\t\tl.add_xpath('brandCurrentItem', './link[@target=\"currentItem\"]/@url')\n\t\t\tl.add_xpath('brandLanguage', './@language')\n\t\t\tl.add_xpath('brandAvgDuration', './@typicalDuration')\n\t\t\tl.add_xpath('brandFrequency', './@frequency')\n\t\t\tl.add_xpath('channelId', './network/@id')\n\t\t\tl.add_xpath('channelName', './network/@name')\n\t\t\tl.add_xpath('channelHomepage', 'concat(\"http://www.bbc.co.uk/\", ./network/@id)')\n\t\t\tl.add_value('ownerId', 'BBC')\n\t\t\tl.add_value('ownerName', 'BBC Radio')\n\t\t\tl.add_value('ownerKey', 'bbc')\n\t\t\tl.add_value('ownerImage', 'http://static.bbci.co.uk/frameworks/barlesque/2.5.10/desktop/3.5/img/blq-blocks_grey_alpha.png')\n\t\t\tl.add_value('ownerHomepage', 'http://www.bbc.co.uk/')\n\t\t\t\n\t\t\tself.log('Discovering bbc [%s of %s] feeds' % (i, podcastCount), level=log.INFO)\n\t\t\n\t\t\n\t\t\titem = l.load_item()\n\t\t\tyield item\n\t\n","sub_path":"scrapy/projects/getpodcasts/spiders/bbc_discovery_spider.py","file_name":"bbc_discovery_spider.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"184219059","text":"import telebot\nimport json\n\n\nwith open('credentials.json', 'r') as credentials_json:\n credentials = json.load(credentials_json)\n\nTOKEN = credentials.get('bot_api_token')\n\nbot = telebot.TeleBot(TOKEN)\n\n\n@bot.message_handler(commands=['start'])\ndef button_click(message):\n response_message = f'Привет, я создан для отправки новых билдов'\n print(message.chat.id)\n bot.send_message(message.chat.id, response_message)\n\nbot.polling()\n","sub_path":"get_chat_id.py","file_name":"get_chat_id.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"215668656","text":"__version__ = \"0.0.2\"\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"weatherbroker\",\n version=__version__,\n author='Altertech Group',\n author_email=\"pr@altertech.com\",\n description=\"Weather broker for Python\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"https://github.com/alttch/weatherbroker\",\n packages=setuptools.find_packages(),\n license='Apache License 2.0',\n install_requires=['requests'],\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\"\n ),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"465787445","text":"import sys\n\nORG_PATH = sys.argv[1]\nNEW_PATH = sys.argv[2]\nDIFF_PATH = sys.argv[3]\n\n\ndef find_sequences(array):\n sequences = [1]\n counter = 1\n for i in range(0, len(array)-1):\n if array[i] + 1 == array[i+1]:\n sequences[counter-1] += 1\n else:\n sequences.append(1)\n counter += 1\n return sequences\n\n\ndef what_to_add(org_array, final_index):\n add_index = {}\n indexes = final_index[-1].split(\"//\")\n for index in indexes:\n parts = index.split(\"-\")\n if len(parts) == 1:\n if parts[0] != \"\":\n add_index[str(parts[0])] = org_array[int(parts[0])].replace(\"~~~c~~~\", \"\")\n elif len(parts) == 2:\n data = \"\"\n for i in range(int(parts[0]), int(parts[1]) + 1):\n data += org_array[i].replace(\"~~~c~~~\", \"\")\n add_index[str(parts[0]) + \"-\" + str(parts[1])] = data\n return add_index\n\n\ndef add_to_final_index(final_index, array):\n array_sq = find_sequences(array)\n counter = 0\n i = 0\n final_index.append(\"\")\n while i < len(array) and counter < len(array_sq):\n if array_sq[counter] == 0:\n final_index[-1] += str(array[i]) + \"//\"\n else:\n final_index[-1] += str(array[i]) + \"-\" + str((array[i] + array_sq[counter]) - 1) + \"//\"\n i += array_sq[counter]\n counter += 1\n return final_index\n\n\ndef create_final_index(org_array, no_change, add, remove):\n final_index = []\n final_index = add_to_final_index(final_index, no_change)\n final_index = add_to_final_index(final_index, remove)\n final_index = add_to_final_index(final_index, add)\n final_index[2] = what_to_add(org_array, final_index)\n print(final_index)\n return final_index\n\n\ndef distance_to_closer(index, array, value):\n max_value = len(array) - index\n for i in range(max_value):\n try:\n if value == array[index + i]:\n return i\n except IndexError:\n i = max_value\n return -1\n\n\ndef same(org_lines, new_lines):\n same_lines_index = []\n for i in range(0, max(len(org_lines), len(new_lines))):\n try:\n new_line = new_lines[i]\n og_line = org_lines[i]\n if new_line == og_line:\n same_lines_index.append(i)\n except IndexError:\n break\n return same_lines_index\n\n\ndef split_to_chars(lines):\n words = []\n for line in lines:\n for word in list(line):\n words.append(word)\n return words\n\n\ndef test1(i, org, data, new):\n tmp = org.copy()\n tmp.insert(i, data)\n return len(same(tmp, new))\n\n\ndef test2(start, i, org, new):\n tmp = org.copy()\n del tmp[start:i]\n return len(same(tmp, new))\n\n\ndef find_differences(org, new, same_index):\n added_index = []\n removed_index = []\n i = 0\n data = \"\"\n start = i\n while i < max(len(org), len(new)):\n start += 1\n if i not in same_index:\n start -= 1\n try:\n add_len = test1(i, org, new[i], new)\n remove_len = test2(start, i, org, new)\n if add_len > remove_len:\n # print(\"--------------------\")\n # print(\"added\", i)\n org.insert(i, new[i] + \"~~~c~~~\")\n same_index = same(org, new)\n for j in range(start, i+1):\n added_index.append(j)\n start = i\n if add_len < remove_len:\n # print(\"--------------------\")\n # print(\"removed\", i)\n new.insert(i, org[i] + \"~~~c~~~\")\n same_index = same(org, new)\n for j in range(start, i+1):\n added_index.append(j)\n start = i\n if add_len == remove_len:\n data += new[i]\n except IndexError:\n if len(new) > i:\n org.append(new[i] + \"~~~c~~~\")\n same_index = same(org, new)\n added_index.append(i)\n else:\n new.append(org[i] + \"~~~c~~~\")\n same_index = same(org, new)\n removed_index.append(i)\n i += 1\n added_index = list(dict.fromkeys(added_index))\n same_index = list(dict.fromkeys(same_index))\n removed_index = list(dict.fromkeys(removed_index))\n\n return create_final_index(org, same_index, added_index, removed_index)\n\n\ndef org(org_lines, new_lines):\n org_words = split_to_chars(org_lines)\n new_words = split_to_chars(new_lines)\n same_index = same(org_words, new_words)\n return find_differences(org_words, new_words, same_index)\n\n\ndef create_diff_file(data):\n file_handler = open(DIFF_PATH, \"w\")\n for part in data:\n file_handler.write(str(part))\n file_handler.write(\"\\n\")\n file_handler.close()\n\n\ndef main():\n file1 = open(ORG_PATH, 'r')\n file2 = open(NEW_PATH, 'r')\n org_lines = file1.readlines()\n new_lines = file2.readlines()\n create_diff_file(org(org_lines, new_lines))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"smart_server/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"314167167","text":"# encoding: utf-8\n\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi \n\"\"\"\n\nimport datetime\n\nfrom dataproperty import *\nimport pytest\nimport six\n\n\nnan = float(\"nan\")\ninf = float(\"inf\")\n\n\nclass Test_is_integer:\n\n @pytest.mark.parametrize([\"value\"], [\n [0], [99999999999], [-99999999999],\n [1234567890123456789], [-1234567890123456789],\n [\"0\"], [\"99999999999\"], [\"-99999999999\"],\n [\" 1\"], [\"1 \"],\n ])\n def test_normal(self, value):\n assert is_integer(value)\n\n @pytest.mark.parametrize([\"value\"], [\n [None], [nan], [inf],\n [0.5], [\"0.5\"],\n [.999], [\".999\"],\n [\"\"], [\"test\"], [\"1a1\"], [\"11a\"], [\"a11\"],\n [True],\n [1e-05], [-1e-05],\n [\"1e-05\"], [\"-1e-05\"],\n [-0.00001],\n ])\n def test_abnormal(self, value):\n assert not is_integer(value)\n\n\nclass Test_is_hex:\n\n @pytest.mark.parametrize([\"value\"], [\n [\"0x00\"], [\"0xffffffff\"], [\"a\"], [\"f\"],\n ])\n def test_normal(self, value):\n assert is_hex(value)\n\n @pytest.mark.parametrize([\"value\"], [\n [None], [nan], [inf],\n [0], [1], [0.5],\n [\"test\"], [\"g\"],\n [True],\n ])\n def test_abnormal(self, value):\n assert not is_hex(value)\n\n\nclass Test_is_float:\n\n @pytest.mark.parametrize([\"value\"], [\n [0.0], [0.1], [-0.1], [1], [-1],\n [\"0.0\"], [\"0.1\"], [\"-0.1\"], [\"1\"], [\"-1\"],\n [.5], [0.],\n [\"1e-05\"],\n [nan], [inf],\n ])\n def test_normal(self, value):\n assert is_float(value)\n\n @pytest.mark.parametrize([\"value\"], [\n [None],\n [\"test\"],\n [\"inf\"],\n [True],\n ])\n def test_abnormal(self, value):\n assert not is_float(value)\n\n\nclass Test_is_nan:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [nan, True],\n\n [None, False],\n [\"nan\", False],\n [\"1\", False],\n [inf, False],\n [1, False],\n [0.1, False],\n [True, False],\n ])\n def test_normal(self, value, expected):\n assert is_nan(value) == expected\n\n\nclass Test_is_not_empty_string:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"nan\", True],\n [\"テスト\", True],\n\n [None, False],\n [\"\", False],\n [\" \", False],\n [\"\\t\", False],\n [\"\\n\", False],\n [[], False],\n [1, False],\n [True, False],\n ])\n def test_normal(self, value, expected):\n assert is_not_empty_string(value) == expected\n\n\nclass Test_is_empty_string:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"nan\", False],\n [\"テスト\", False],\n\n [None, True],\n [\"\", True],\n [\" \", True],\n [\"\\t\", True],\n [\"\\n\", True],\n [True, True],\n [[], True],\n [1, True],\n ])\n def test_normal(self, value, expected):\n assert is_empty_string(value) == expected\n\n\nclass Test_is_list_or_tuple:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [[], True],\n [[1], True],\n [[\"a\"] * 200000, True],\n [(), True],\n [(1,), True],\n [(\"a\",) * 200000, True],\n\n [None, False],\n [nan, False],\n [0, False],\n [\"aaa\", False],\n [True, False],\n ])\n def test_normal(self, value, expected):\n assert is_list_or_tuple(value) == expected\n\n\nclass Test_is_empty_list_or_tuple:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [(), True],\n [[], True],\n [None, True],\n\n [[1], False],\n [[\"a\"] * 200000, False],\n [(1,), False],\n [(\"a\",) * 200000, False],\n ])\n def test_normal(self, value, expected):\n assert is_empty_list_or_tuple(value) == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [nan, False],\n [0, False],\n [\"aaa\", False],\n [True, False],\n ])\n def test_abnormal(self, value, expected):\n assert is_empty_list_or_tuple(value) == expected\n\n\nclass Test_is_not_empty_list_or_tuple:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [(), False],\n [[], False],\n [None, False],\n\n [[1], True],\n [[\"a\"] * 200000, True],\n [(1,), True],\n [(\"a\",) * 200000, True],\n ])\n def test_normal(self, value, expected):\n assert is_not_empty_list_or_tuple(value) == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [nan, False],\n [0, False],\n [\"aaa\", False],\n [True, False],\n ])\n def test_abnormal(self, value, expected):\n assert is_not_empty_list_or_tuple(value) == expected\n\n\nclass Test_is_datetime:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [datetime.datetime(2016, 1, 1), True],\n\n [None, False],\n [\"\", False],\n [\"テスト\", False],\n [[], False],\n [1, False],\n [True, False],\n ])\n def test_normal(self, value, expected):\n assert is_datetime(value) == expected\n\n\nclass Test_get_integer_digit:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [0, 1], [-0, 1],\n [.99, 1], [-.99, 1],\n [\".99\", 1], [\"-.99\", 1],\n [1.01, 1], [-1.01, 1],\n [9.99, 1], [-9.99, 1],\n [\"9.99\", 1], [\"-9.99\", 1],\n [\"0\", 1], [\"-0\", 1],\n\n [10, 2], [-10, 2],\n [99.99, 2], [-99.99, 2],\n [\"10\", 2], [\"-10\", 2],\n [\"99.99\", 2], [\"-99.99\", 2],\n\n [100, 3], [-100, 3],\n [999.99, 3], [-999.99, 3],\n [\"100\", 3], [\"-100\", 3],\n [\"999.99\", 3], [\"-999.99\", 3],\n\n [10000000000000000000, 20], [-10000000000000000000, 20],\n [99999999999999099999.99, 20], [-99999999999999099999.99, 20],\n [\"10000000000000000000\", 20], [\"-10000000000000000000\", 20],\n [\"99999999999999099999.99\", 20], [\"-99999999999999099999.99\", 20],\n\n [True, 1],\n [False, 1],\n ])\n def test_normal(self, value, expected):\n assert get_integer_digit(value) == expected\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [99999999999999999999.99, 21],\n [-99999999999999999999.99, 21],\n [\"99999999999999999999.99\", 21],\n [\"-99999999999999999999.99\", 21],\n ])\n def test_abnormal(self, value, expected):\n # expected result == 20\n assert get_integer_digit(value) == expected\n\n @pytest.mark.parametrize([\"value\", 'exception'], [\n [None, TypeError],\n [\"test\", ValueError],\n [\"a\", ValueError],\n [\"0xff\", ValueError],\n [nan, ValueError],\n [inf, OverflowError],\n ])\n def test_exception(self, value, exception):\n with pytest.raises(exception):\n get_integer_digit(value)\n\n\nclass Test_get_number_of_digit:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [0, (1, 0)], [-0, (1, 0)],\n [\"0\", (1, 0)], [\"-0\", (1, 0)],\n [10, (2, 0)], [-10, (2, 0)],\n [\"10\", (2, 0)], [\"-10\", (2, 0)],\n [10.1, (2, 1)], [-10.1, (2, 1)],\n [\"10.1\", (2, 1)], [\"-10.1\", (2, 1)],\n [10.01, (2, 2)], [-10.01, (2, 2)],\n [10.001, (2, 2)], [-10.001, (2, 2)],\n [100.1, (3, 1)], [-100.1, (3, 1)],\n [100.01, (3, 1)], [-100.01, (3, 1)],\n [0.1, (1, 1)], [-0.1, (1, 1)],\n [\"0.1\", (1, 1)], [\"-0.1\", (1, 1)],\n [.99, (1, 2)], [-.99, (1, 2)],\n [\".99\", (1, 2)], [\"-.99\", (1, 2)],\n [0.01, (1, 2)], [-0.01, (1, 2)],\n [\"0.01\", (1, 2)], [\"-0.01\", (1, 2)],\n [0.001, (1, 3)], [-0.001, (1, 3)],\n [\"0.001\", (1, 3)], [\"-0.001\", (1, 3)],\n [0.0001, (1, 4)], [-0.0001, (1, 4)],\n [\"0.0001\", (1, 4)], [\"-0.0001\", (1, 4)],\n [0.00001, (1, 4)], [-0.00001, (1, 4)],\n [\"0.00001\", (1, 4)], [\"-0.00001\", (1, 4)],\n [2e-05, (1, 4)], [-2e-05, (1, 4)],\n [\"2e-05\", (1, 4)], [\"-2e-05\", (1, 4)],\n ])\n def test_normal(self, value, expected):\n assert get_number_of_digit(value) == expected\n\n @pytest.mark.parametrize([\"value\", \"expected1\", \"expected2\"], [\n [True, 1, 1],\n ])\n def test_annormal(self, value, expected1, expected2):\n sig_digit, float_digit = get_number_of_digit(value)\n assert sig_digit == expected1\n assert float_digit == expected2\n\n @pytest.mark.parametrize([\"value\"], [\n [None],\n [\"0xff\"], [\"test\"], [\"テスト\"],\n ])\n def test_abnormal(self, value):\n sig_digit, float_digit = get_number_of_digit(value)\n assert is_nan(sig_digit)\n assert is_nan(float_digit)\n\n\nclass Test_get_text_len:\n\n @pytest.mark.parametrize([\"value\", \"expected\"], [\n [\"\", 0],\n [\n \"aaaaaaaaaaaaaaaaaaaa\"\n \"aaaaaaaaaaaaaaaaaaaa\"\n \"aaaaaaaaaaaaaaaaaaaa\"\n \"aaaaaaaaaaaaaaaaaaaa\"\n \"aaaaaaaaaaaaaaaaaaaa\",\n 100\n ],\n [u\"あ\", 1],\n\n [None, 4],\n [nan, 3],\n [inf, 3],\n ])\n def test_normal(self, value, expected):\n assert get_text_len(value) == expected\n","sub_path":"test/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":8983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26563143","text":"import time\nimport pyrax\nimport logging\nimport vmbuilder.utils\n\n\nlogger = logging.getLogger('build')\n\nimport requests.packages.urllib3\nrequests.packages.urllib3.disable_warnings()\n\n\ndef _connect(domain, cred, region):\n try:\n pyrax.settings.set('identity_type', 'rackspace')\n pyrax.set_credentials(cred['rackspace']['client_id'], cred['rackspace']['api_key'], region=region)\n dns = pyrax.cloud_dns\n except Exception as e:\n logger.error(\"Error connecting to rackspace with key [%s] %s\" % (cred['rackspace']['client_id'], e))\n exit(1)\n\n dom = None\n for cur_dom in dns.get_domain_iterator():\n if cur_dom.name == domain:\n dom = cur_dom\n\n if dom is None:\n logger.error(\"Domain [%s] is not available\" % domain)\n exit(1)\n\n return dom\n\n\ndef dnsupdate(hosts, cred, ips, dry, inventory='hosts'):\n rec = {}\n for h in hosts:\n ip = ips[h]['public_ip_address']\n if ip is None:\n logger.info(\" No IP was given to update DNS for server [%s] \" % (h))\n continue\n\n hv = {}\n hv = vmbuilder.utils.load_host_vars(h, inventory=inventory)\n\n domain = h.partition('.')[2]\n zone = _connect(domain, cred, hv['VM_PROVIDER']['region'])\n if zone is None:\n logger.error(\" The domain [%s] could not be found\" % (domain))\n return\n\n aliases = []\n if 'aliases' in hv:\n aliases = hv['aliases'].split(' ')\n aliases.append(h.replace('.' + domain, ''))\n\n for alias in aliases:\n cur_alias = \"%s.%s\" % (alias, domain)\n recs = [{\"type\": 'A', \"name\": \"%s.%s\" % (alias, domain), \"data\": ip, \"ttl\": 300}]\n try:\n logger.info(\" Adding A record [%s] for [%s] \" % (cur_alias, ip))\n rec[cur_alias] = zone.add_records(recs)\n except pyrax.exceptions.DomainRecordAdditionFailed as e:\n logger.warning(\" Warning DNS record already exist: %s\" % (e))\n except Exception as e:\n logger.error(\" Error while adding DNS records: %s\" % (e))\n exit(1)\n\n time.sleep(10)\n\n\ndef dnsremove(hosts, cred, ips, dry, inventory='hosts'):\n for h in hosts:\n hv = {}\n hv = vmbuilder.utils.load_host_vars(h, inventory=inventory)\n\n domain = h.partition('.')[2]\n zone = _connect(domain, cred, hv['VM_PROVIDER']['region'])\n if zone is None:\n logger.error(\" The domain [%s] could not be found\" % (domain))\n\n aliases = []\n if 'aliases' in hv:\n aliases = hv['aliases'].split(' ')\n aliases.append(h.replace('.' + domain, ''))\n\n recs = zone.list_records()\n for r in recs:\n for alias in aliases:\n cur_alias = \"%s.%s\" % (alias, domain)\n if r.name == cur_alias:\n try:\n logger.info(\" Deleting A record [%s]\" % (cur_alias))\n r.delete()\n except Exception as e:\n logger.error(\" Error while adding DNS records: %s\" % (e))\n exit(1)\n","sub_path":"vmbuilder/dns/dns_rackspace.py","file_name":"dns_rackspace.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"534647101","text":"#set up all the sounds\n#text press a key to retry\n\nimport pygame, random, sys\nfrom pygame.locals import *\n\nWINDOWWIDTH=800\nWINDOWHEIGHT=450\nTEXTCOLOR = (0, 0, 0)\nBACKGROUNDCOLOR=(255,255,255)\nFPS = 60\nBADDIEMINSIZE = 10\nBADDIEMAXSIZE = 40\nBADDIEMINSPEED = 1\nBADDIEMAXSPEED = 8\nADDNEWBADDIERATE = 6\nPLAYERMOVERATE = 5\nWHITE = (255,255,255)\nBLACK = (0,0,0)\nRED = (255, 0, 0)\nGREEN = (0, 100, 0)\nclock = pygame.time.Clock()\nscreen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\ndef waitForPlayerToPressKey():\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE: # Pressing ESC quits.\n terminate()\n return\n\n\n\ndef drawText(text, font, surface, x, y):\n textobj = font.render(text, 1, TEXTCOLOR)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n\ndef newmob():\n m = Mob()\n all_sprites.add(m)\n mobs.add(m)\n\ndef draw_life(surface, x, y, pct):\n if pct < 0:\n pct = 0 #so we don't have negativ value for life\n BAR_LENGTH = 100\n BAR_HEIGHT = 10 #size of the bar\n fill = (pct / 100) * BAR_LENGTH\n outline_rectangle = pygame.Rect (x, y, BAR_LENGTH, BAR_HEIGHT) #the rectangle that doesn't change\n fill_rectangle2 = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)#so we can see the ammount of life we lost\n fill_rectangle = pygame.Rect (x, y, fill, BAR_HEIGHT) #the rectangle that display the life, it goes smaller when we get hit\n pygame.draw.rect(surface, RED, fill_rectangle2) # draw the life lost\n pygame.draw.rect(surface, GREEN, fill_rectangle) #draw the life rectangle\n pygame.draw.rect(surface, WHITE, outline_rectangle, 2) #draw outline_rectangle\n\n# Set up pygame, the window, and the mouse cursor.\npygame.init()\nmainClock = pygame.time.Clock()\nwindowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\npygame.display.set_caption('Dodger')\npygame.mouse.set_visible(False)\n\n# Set up the fonts.\nfont = pygame.font.SysFont(None, 48)\nfont2= pygame.font.SysFont(\"Courier\",75)\n\n# Set up sounds.\npygame.mixer.music.load('background.mid')\n#pygame.mixer.music.set_volume(0.4) #change the volume of the music\n\n#shoot_sound = pygame.mixer.Sound('shoot.wave')\n#explosion_sound = pygame.mixer.Sound('Explosion.wav')\n#death_sound = pygame.mixer.Sound('Death.wav')\n\n\n# Set up images.\n\nplayerImage = pygame.image.load('perenoel.png')\nplayerRect = playerImage.get_rect()\nbaddieImage = []\nbaddie_list = ['pinguin.png', 'pinguin2.jpg', 'pinguin3.png'] #all the images we want to chose from\nfor img in baddie_list:\n baddieImage.append(pygame.image.load(img))\nbackground = pygame.image.load(\"background.png\")\nbackground_rect = background.get_rect() #to have a way to locate it\nbulletImage = pygame.image.load(\"gift.png\")\n\n\n# Show the \"Start\" screen.\nwindowSurface.fill(BACKGROUNDCOLOR)\ndrawText('Santawars', font2, windowSurface, (WINDOWWIDTH / 3.5), (WINDOWHEIGHT / 3))\ndrawText('Press a key to start.', font, windowSurface, (WINDOWWIDTH / 2.68) - 50, (WINDOWHEIGHT / 3) + 200)\npygame.display.update()\nwaitForPlayerToPressKey()\n\ntopScore = 0\n#player class\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(playerImage,(150 ,80)) #to scale down our image\n #self.image = playerImage\n self.image.set_colorkey(BLACK) #to remove the white on the border of the image\n self.rect = self.image.get_rect()\n #self.radius = 60 #we can chose this way the size of the circle of the player's hitboxe\n #pygame.draw.circle(self.image, RED, self.rect.center, self.radius) #this line serve to display how big the circle is, but we don't need it in the final game, only to test\n self.rect.centerx = WINDOWWIDTH -700\n self.rect.bottom = WINDOWHEIGHT / 2\n self.speedx = 0\n self.speedy = 0\n self.life = 100 #setup life so we don't get oneshoted everytime we get hit\n\n #update the player sprite\n def update(self):\n self.speedx = 0\n self.speedy = 0\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.speedy = 0\n self.speedx = -8\n if keys[pygame.K_RIGHT]:\n self.speedy = 0\n self.speedx = 8\n if keys[pygame.K_UP]:\n self.speedx = 0\n self.speedy = -8\n if keys[pygame.K_DOWN]:\n self.speedx = 0\n self.speedy = 8\n if self.rect.right > WINDOWWIDTH:\n self.rect.right = WINDOWWIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.top < 0:\n self.rect.top = 0\n if self.rect.bottom > WINDOWHEIGHT:\n self.rect.bottom = WINDOWHEIGHT\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n\n\n\n\n #allow the player to shoot\n def shoot(self):\n bullet = Bullet(self.rect.right, self.rect.centery) #do the bullet spawn at the center extremity of the player\n all_sprites.add(bullet)\n bullets.add(bullet)\n #shoot_sound.play()\n\n\n#class of the ennemies\nclass Mob(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = random.choice(baddieImage)\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = 20 #same as player\n #pygame.draw.circle(self.image, RED, self.rect.center, self.radius) #same as player\n self.rect.y = random.randrange(500) #random spawn on axe Y\n self.rect.x = (WINDOWWIDTH+100) #to get smooth animations, not that they spawn into existence at the right of the screen, instead they appear naturally from the extremity of the screen\n self.speedx = random.randrange(-8, -3) #random speed on X\n self.speedy = random.randrange(-3, 3)#random speed on Y\n self.rotation = 0 #to choose how much the sprite rotates, initially it doesnt\n\n #update the ennemies sprite\n def update(self):\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top < 0: #if an ennemi hits a extermity of the screen it bounces and continue his trajectory instead of being stuck to the extremity\n self.rect.top = 0\n self.speedy = -self.speedy\n if self.rect.bottom > WINDOWHEIGHT:\n self.rect.bottom = WINDOWHEIGHT\n self.speedy = -self.speedy\n if self.rect.left < -25:\n self.rect.y = random.randrange(500)\n self.rect.x = (WINDOWWIDTH+100)\n self.speedx = random.randrange(-8, -3)\n self.speedy = random.randrange(-3, 3)\n\n#class of the bullet\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(bulletImage,(40,40))\n #self.image.set.colorkey(255,255,255)\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedx= 10\n #update bullet sprite\n def update(self):\n self.rect.x += self.speedx\n #kill it if it moves off the screen\n if self.rect.right > WINDOWWIDTH:\n self.kill() #remove completly the sprite if it goes of the screen\n\nall_sprites = pygame.sprite.Group() #all the sprites are there so they can be drawn and updated\nmobs = pygame.sprite.Group() #we make them all the ennemies in the same group so it's easier to work with the them (hitboxes...)\nbullets = pygame.sprite.Group() #same but for the bullets\nplayer = Player()\nall_sprites.add(player)\n\nfor i in range(8): #spawn a specific number of mobs on the screen\n newmob()\n\npygame.mixer.music.play(-1, 0.0) #start the music before the start of the game\nwhile True:\n # Set up the start of the game.\n score = 0\n playerRect.topleft = (WINDOWWIDTH -900, WINDOWHEIGHT / 2)\n moveLeft = moveRight = moveUp = moveDown = False\n #so the game keeps running at the right speed\n clock.tick(FPS)\n while True: # The game loop runs while the game part is playing.\n score += 1 # Increase score.\n\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n\n if event.type == pygame.KEYDOWN: #when you press the key it does something, not when you release the key\n if event.key == pygame.K_ESCAPE:\n terminate()\n if event.key == pygame.K_SPACE:\n player.shoot()\n\n\n\n #Update the sprites\n all_sprites.update()\n\n #check to see if a bullethit a mob\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True) #if a bullet hit a mobs, both get deleted\n for hit in hits: #we have to add new mobs for each mobs that got deleted from the game\n score += 20\n #explosion_sound.play()\n newmob()\n\n\n #check to see if a mob hit the player\n hits = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle) #if a mobs collide, it is stocked it the list \"hits\", the last element allow us to use the circle of the hitboxes we made in classes\n for hit in hits:\n player.life -= 40 #we lose life when we get hit\n newmob()\n if player.life <= 0:\n #death_sound.play()\n terminate()\n\n #Draw everything\n windowSurface.blit(background,background_rect)\n all_sprites.draw(windowSurface)\n drawText('Score: %s' % (score), font, windowSurface, 10, 0)\n drawText('Top Score: %s' % (topScore), font, windowSurface, 10, 40)\n draw_life(screen, 5,5,player.life) #draw the life bar\n #after drawing everything, flip the display\n pygame.display.flip()\n mainClock.tick(FPS)\n\n # Stop the game and show the \"Game Over\" screen.\n pygame.mixer.music.stop()\n gameOverSound.play()\n\n drawText('GAME OVER', font, windowSurface, (WINDOWWIDTH / 3), (WINDOWHEIGHT / 3))\n drawText('Press a key to play again.', font, windowSurface, (WINDOWWIDTH / 3) - 80, (WINDOWHEIGHT / 3) + 50)\n pygame.display.update()\n waitForPlayerToPressKey()\n\n gameOverSound.stop()\n","sub_path":"dodger.py","file_name":"dodger.py","file_ext":"py","file_size_in_byte":10358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592136270","text":"from __future__ import print_function\r\nimport pygame\r\nfrom Controller import *\r\nfrom Data import *\r\nfrom Pygame_UI import *\r\nfrom rover import Rover\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nimport math\r\n\r\nclass RoverExtended(Rover):\r\n def __init__(self):\r\n Rover.__init__(self)\r\n self.d = Data()\r\n self.userInterface = Pygame_UI()\r\n self.clock = pygame.time.Clock()\r\n self.FPS = 30\r\n self.image = None\r\n self.buttonState = 1\r\n self.quit = False\r\n self.controller = None\r\n self.controllerType = None\r\n self.canSave = False\r\n self.paused = False\r\n self.isReversed = False\r\n self.isLearning = False\r\n self.lightsOn = False\r\n # angle ranges from 0 to 180 where 180 = hard left, 90 = forward and 0 = hard right\r\n self.angle = None\r\n self.treads = [0,0]\r\n self.timeStart = time.time()\r\n self.run()\r\n\r\n def getNewTreads(self):\r\n if self.angle <= 180 and self.angle >= 130:\r\n self.treads = [-1,1]\r\n elif self.angle < 130 and self.angle >= 100:\r\n self.treads = [-0.05, 1] #0,1\r\n elif self.angle < 100 and self.angle >= 80:\r\n self.treads = [1, 1]\r\n elif self.angle < 80 and self.angle >= 50:\r\n self.treads = [1, -0.05] #1,0\r\n elif self.angle < 50 and self.angle >= 0:\r\n self.treads = [1,-1]\r\n\r\n def setControls(self):\r\n black = (0,0,0)\r\n controls = None\r\n while not controls:\r\n self.userInterface.display_message(\"Enter K to control from Keyboard, or W to control from Wheel (K/W): \", black, 0,0)\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if chr(event.key) in ['K','k','W','w']:\r\n controls = chr(event.key).upper()\r\n self.clock.tick(self.FPS)\r\n pygame.display.flip()\r\n self.userInterface.screen.fill((255,255,255))\r\n \r\n while not self.canSave:\r\n self.userInterface.display_message(\"Do you want this data to be recorded? (Y/N):\", black, 0,0)\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if chr(event.key) in ['Y','y','N','n']:\r\n self.canSave = chr(event.key).upper()\r\n self.clock.tick(self.FPS)\r\n pygame.display.flip()\r\n self.userInterface.screen.fill((255,255,255)) \r\n\r\n if self.canSave == 'Y':\r\n self.canSave = True\r\n self.isLearning = True\r\n else:\r\n self.canSave = False\r\n\r\n if controls == \"K\":\r\n self.controllerType = \"Keyboard\"\r\n self.paused = True\r\n self.controller = Keyboard()\r\n print (\"To move around with the rover, click the PyGame window\")\r\n print (\"W = Forward, A = Left, S = Reverse, D = Right\")\r\n elif controls == \"W\":\r\n self.controllerType = \"Wheel\"\r\n self.controller = Wheel()\r\n else:\r\n self.quit = True\r\n if self.canSave:\r\n print ('Data is recording...')\r\n\r\n def reverse(self):\r\n self.treads = [-1,-1]\r\n\r\n def freeze(self):\r\n self.treads = [0,0]\r\n self.set_wheel_treads(0,0)\r\n\r\n # takes input entire buttons array\r\n # looks for \"1\"s and calls functions for that button\r\n def useButtons(self):\r\n buttons = self.controller.getButtonStates()\r\n if len(buttons) == 0:\r\n print(\"\\n\\n Plug in the wheel!\")\r\n self.quit = True\r\n\r\n\r\n # only runs once per press, instead of constant hold down\r\n if not any(buttons):\r\n self.buttonState = 0\r\n if any(buttons) and self.buttonState == 0:\r\n self.buttonState = 1\r\n # left handel under wheel\r\n if buttons[0] == 1:\r\n self.lightsOn = not self.lightsOn\r\n # right handel under wheel\r\n elif buttons[1] == 1:\r\n print(\"Battery percentage:\", self.get_battery_percentage())\r\n # top left button\r\n elif buttons[2] == 1:\r\n self.paused = not self.paused\r\n # top right button\r\n elif buttons[3] == 1:\r\n pass\r\n # middle left button\r\n elif buttons[4] == 1:\r\n self.eraseFrames(self.FPS)\r\n # middle right button\r\n elif buttons[5] == 1:\r\n self.eraseFrames(self.FPS * 10)\r\n # bottom left button\r\n elif buttons[6] == 1:\r\n pass\r\n # bottom right button\r\n elif buttons[7] == 1:\r\n self.quit = True\r\n print(\"Program stopping...\")\r\n # gear shift pushed towards you\r\n elif buttons[8] == 1:\r\n self.isReversed = not self.isReversed\r\n # gear shift pushed away from you\r\n elif buttons[9] == 1:\r\n self.isReversed = not self.isReversed\r\n\r\n def endSession(self):\r\n self.set_wheel_treads(0,0)\r\n if self.canSave:\r\n self.d.save('dataset.h5')\r\n pygame.quit()\r\n cv2.destroyAllWindows()\r\n self.close()\r\n\r\n def process_video_from_rover(self, jpegbytes, timestamp_10msec):\r\n window_name = 'Machine Perception and Cognitive Robotics'\r\n array_of_bytes = np.fromstring(jpegbytes, np.uint8)\r\n self.image = cv2.imdecode(array_of_bytes, flags=3)\r\n k = cv2.waitKey(5) & 0xFF\r\n return self.image\r\n\r\n def useKey(self, key):\r\n self.isReversed = False\r\n key = chr(key)\r\n if key in ['w','a','d']:\r\n self.angle = self.controller.getAngle(key)\r\n self.paused = False\r\n elif key == 'z':\r\n self.quit = True\r\n elif key == 's':\r\n self.isReversed = True\r\n elif key == 'b':\r\n print(self.get_battery_percentage())\r\n elif key == ' ':\r\n self.paused = not self.paused\r\n elif key == 'p':\r\n self.eraseFrames(self.FPS)\r\n elif key == 'l':\r\n self.pauseLearning()\r\n\r\n def eraseFrames(self, count):\r\n size = len(self.d.angles)\r\n if (size - count > 0):\r\n print(\"--\", \"Deleting\" , count, \"seconds of frames!\")\r\n self.d.angles = self.d.angles[:size - count]\r\n self.d.images = self.d.images[:size - count]\r\n else:\r\n print(\"Couldn't delete! List has less than\", count, \"frames!\")\r\n\r\n def pauseLearning(self):\r\n self.isLearning = not self.isLearning\r\n\r\n def displayDashboard(self):\r\n black = (0,0,0)\r\n lightsBool = \"On\" if self.lightsOn else \"Off\"\r\n motionBool = \"Stopped\" if self.paused else \"Moving\"\r\n learning = \"Learning\" if self.isLearning else \"Not Learning\"\r\n\r\n self.userInterface.display_message(\"Rover Battery Percentage: \" + str(self.get_battery_percentage()), black, 0,0)\r\n self.userInterface.display_message(\"Controller Type: \" + self.controllerType, black, 0, self.userInterface.fontSize * 1)\r\n self.userInterface.display_message(\"Lights: \" + lightsBool, black, 0, self.userInterface.fontSize*2)\r\n self.userInterface.display_message(\"Steering Angle: \" + str(self.angle), black, 0, self.userInterface.fontSize*3)\r\n self.userInterface.display_message(\"Treads: \" + str(self.treads), black, 0, self.userInterface.fontSize*4)\r\n self.userInterface.display_message(\"Motion: \" + motionBool, black, 0, self.userInterface.fontSize*5)\r\n self.userInterface.display_message(\"Reversed: \" + str(self.isReversed), black, 0, self.userInterface.fontSize*6)\r\n self.userInterface.display_message(\"Number of Frames Collected: \" + str(len(self.d.angles)), black, 0, self.userInterface.fontSize*7)\r\n self.userInterface.display_message(\"Can Collect Data (initialized at start): \" + str(self.canSave), black, 0, self.userInterface.fontSize*8)\r\n self.userInterface.display_message(\"To record data, must not be paused and not be reversed: \" + learning, black, 0, self.userInterface.fontSize * 9)\r\n\r\n def checkTreadStatus(self, oldTreads):\r\n timeCurrent = time.time()\r\n timer = abs(self.timeStart - timeCurrent)\r\n newTreads = self.treads\r\n\r\n # Resetting tread state\r\n if oldTreads != newTreads:\r\n self.freeze()\r\n\r\n # Refreshing tread state\r\n if oldTreads != newTreads or timer > 1:\r\n self.timeStart = timeCurrent\r\n oldTreads = newTreads\r\n self.set_wheel_treads(newTreads[0],newTreads[1])\r\n return oldTreads\r\n\r\n def run(self):\r\n while type(self.image) == type(None):\r\n pass\r\n print(self.get_battery_percentage())\r\n oldTreads = None\r\n self.setControls()\r\n while not self.quit:\r\n self.displayDashboard()\r\n\r\n # Using approperiate controller type\r\n if self.controllerType == \"Wheel\":\r\n self.angle = self.controller.getAngle()\r\n self.useButtons()\r\n else:\r\n key = self.controller.getActiveKey()\r\n if key:\r\n self.useKey(key)\r\n\r\n # Getting new treads based on angle\r\n self.getNewTreads()\r\n\r\n # Boolean user-inputted controls\r\n if self.isReversed:\r\n rev = self.treads[::-1]\r\n self.treads = [rev[0]* -1, rev[1] * -1]\r\n if self.paused:\r\n self.freeze()\r\n if self.lightsOn:\r\n self.turn_the_lights_on()\r\n else:\r\n self.turn_the_lights_off()\r\n\r\n\r\n # Ignore this, needed for fast tread switching\r\n # and to not back up the tread switching queue\r\n oldTreads = self.checkTreadStatus(oldTreads)\r\n\r\n\r\n # Saving data, currently not saving when rover is in reversed state\r\n self.isLearning = self.canSave and not self.isReversed and not self.paused\r\n if self.isLearning:\r\n self.d.angles.append(self.angle)\r\n self.d.images.append(self.image)\r\n \r\n # Displaying images \r\n cv2.imshow(\"RoverCam\", self.image)\r\n \r\n self.imgAngle = self.displayWithAngle(self.angle, self.image)\r\n cv2.imshow(\"Display Angle\", self.imgAngle)\r\n \r\n self.imgEdges = self.edges(self.image)\r\n cv2.imshow(\"RoverCamEdges\", self.imgEdges)\r\n\r\n self.clock.tick(self.FPS)\r\n pygame.display.flip()\r\n self.userInterface.screen.fill((255,255,255))\r\n self.endSession()\r\n\r\n def edges(self,image):\r\n imgEdges = cv2.Canny(image,50,200)\r\n return imgEdges\r\n\r\n def displayWithAngle(self, angle, frame):\r\n imgAngle = frame.copy()\r\n if self.angle and not self.isReversed:\r\n radius = 80\r\n angle = angle * math.pi / 180\r\n y = 240 - int(math.sin(angle) * radius)\r\n x = int(math.cos(angle) * radius) + 160\r\n cv2.line(imgAngle, (160, 240), (x, y), (0, 0, 0), 5)\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(imgAngle, str(int(angle * 180 / math.pi)), (x, y), font, .8, (255, 0, 255), 2)\r\n return imgAngle\r\n","sub_path":"RoverExtended.py","file_name":"RoverExtended.py","file_ext":"py","file_size_in_byte":11416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"63738767","text":"import torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport func\n\n\ndef show(prompt, *args):\n print('{:-^50}'.format(prompt))\n for a in args:\n print(a)\n\n\nseqs = ['tiny', 'very_long', 'medium', 'small']\n\n# Sort and restore sequences\nvocab, padded, lens = func.build_padd_tensor(seqs)\nshow('Original', padded, lens)\n\npadded, lens, idx = func.sort_by_length(padded, lens)\nshow('Sorted by length', padded, lens)\n\n# padded, lens = func.restore(padded, lens, idx)\n# show('Restored', padded, lens)\n\n# Build the model\nEMBED_SIZE = 64\nHIDDEN_SIZE = 64\n\nembedding = nn.Embedding(len(vocab), EMBED_SIZE)\n\ngru = nn.GRU(EMBED_SIZE, HIDDEN_SIZE, batch_first=True)\n# NOTE: the 'batch_first' argument only affects its 'input' and 'output',\n# not including 'h_0' and 'h_n'. (See\n# https://pytorch.org/docs/stable/nn.html?highlight=gru#torch.nn.GRU\n# for details.)\n\n# Embed and pack (cannot reverse the order)\nem = embedding(padded)\npacked = pack_padded_sequence(em, lens, batch_first=True)\nshow('Packed', packed.data.shape, packed.batch_sizes)\n\n# Feed into GRU\noutput, hidden = gru(packed)\noutput, output_lens = pad_packed_sequence(output, batch_first=True)\n# output: (batch, max_len, hidden_size)\n# hidden: (num_layers*num_directions, batch, hidden)\nshow('GRU output', output.shape, output_lens, hidden.shape)\n","sub_path":"pad-pack/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"243791866","text":"import datetime\n\ndataAtual = datetime.datetime.now().year\nfor i in range(1):\n nome = str(input(\"Nome: \"))\n anoNasc = int(input(\"Ano de nascimento: \"))\n ctps = int(input(\"Carteira de trabalho (0 não tem): \"))\n if(ctps == 0):\n func = {\n \"nome\": nome,\n \"idade\": dataAtual - anoNasc,\n \"ctps\":ctps\n }\n break\n else:\n anoAdd = int(input(\"Ano de contratação: \"))\n sal = float(input(\"Salário: R$ \"))\n func = {\n \"nome\": nome,\n \"idade\": dataAtual - anoNasc,\n \"ctps\": ctps,\n \"contratação\": anoAdd,\n \"aposentadoria\": (dataAtual - anoNasc) + ((anoAdd + 35) - dataAtual),\n \"salário\": sal\n\n }\n\nfor i,j in func.items():\n print(f'{i} tem valor {j}')","sub_path":"exercicios/ex092.py","file_name":"ex092.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"581849466","text":"import requests\nimport pprint\nimport urllib.parse\nimport time\nfrom pymongo import MongoClient\n\n# 맛집 데이터는 seoul_matjip 이라는 데이��베이스에 저장하겠습니다.\nclient = MongoClient('mongodb://test:test@13.125.185.68', 27017)\ndb = client.seoul_matjip\n\n# 서울시 구마다 맛집을 검색해보겠습니다.\nseoul_gu = [\"서울\", \"인천\", \"대전\", \"대구\", \"광주\", \"부산\", \"울산\"]\n\n# 네이버 검색 API 신청을 통해 발급받은 아이디와 시크릿 키를 입력합니다.\nclient_id = \"Xkk8AQs9lWNsiooHjJ8R\"\nclient_secret = \"UW2NSMecHO\"\n\n\n# 검색어를 전달하면 결과를 반환하는 함수\ndef get_naver_result(keyword):\n time.sleep(0.1)\n # url에 전달받은 검색어를 삽입합니다.\n api_url = f\"https://openapi.naver.com/v1/search/local.json?query={keyword}&display=10&start=1&sort=random\"\n # 아이디와 시크릿 키를 부가 정보로 같이 보냅니다.\n headers = {'X-Naver-Client-Id': client_id, 'X-Naver-Client-Secret': client_secret }\n # 검색 결과를 data에 저장합니다.\n data = requests.get(api_url, headers=headers)\n # 받아온 JSON 결과를 딕셔너리로 변환합니다.\n data = data.json()\n return data['items']\n\n# 저장할 전체 맛집 목록입니다.\ndocs = []\n# 구별로 검색을 실행합니다.\nfor gu in seoul_gu:\n # '강님구 맛집', '종로구 맛집', '용산구 맛집' .. 을 반복해서 인코딩합니다.\n keyword = f'{gu} 맛집'\n # 맛집 리스트를 받아옵니다.\n restaurant_list = get_naver_result(keyword)\n\n # 구별 맛집 구분선입니다.\n print(\"*\"*80 + gu)\n\n for matjip in restaurant_list:\n # 구 정보를 추가합니다.\n matjip['area'] = gu\n # 맛집을 인쇄합니다.\n pprint.pprint(matjip)\n # docs에 맛집을 추가합니다.\n docs.append(matjip)\n\n# 맛집 정보를 저장합니다.\ndb.restaurant.insert_many(docs)","sub_path":"restaurant.py","file_name":"restaurant.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433727314","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 5 12:17:03 2015\n\n@author: george\n\"\"\"\nfrom __future__ import (absolute_import, division,print_function, unicode_literals)\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport math\nfrom math import radians, cos, sin, asin, sqrt\nimport os\n\nfilename1 = r\"C:\\Google Drive\\SiCr_Digitization\\Data\\shadeData_for_analysis\\boiseRiver_shade_50nodes_centerLine1.txt\"\nfilename2 = r\"C:\\Google Drive\\SiCr_Digitization\\Data\\shadeData_for_analysis\\luke_ShadeData.txt\"\noutput = r\"C:\\Google Drive\\SiCr_Digitization\\Data\\shadeData_for_analysis\\result.txt\"\n\n#x = np.loadtxt(filename1,skiprows=1,usecols=(1,))\n#y = np.loadtxt(filename1,skiprows=1,usecols=(2,))\nshade1 = np.loadtxt(filename1,skiprows=1,usecols=(3,))\nshade2 = np.loadtxt(filename2,skiprows=1,usecols=(3,))\n\nshade1mean = np.mean(shade1)\nshade2mean = np.mean(shade2)\n\nshade1SD = np.std(shade1)\nshade2SD = np.std(shade2)\n\ntext1 = 'mean = '+ str(np.round(shade1mean)) + ' StDev = ' + str(np.round(shade1SD))\ntext2 = 'mean = '+ str(np.round(shade2mean)) + ' StDev = ' + str(np.round(shade2SD))\n\nplt.figure(1)\nplt.subplot(211)\nplt.title('Histograms of shade values')\nn, bins, patches = plt.hist(shade1, 20, normed=1,facecolor='g', alpha=0.75)\nplt.xlabel('shade')\nplt.ylabel('probability')\nplt.text(60, .1, text1)\nplt.xlim(0,100)\nplt.grid(True)\nplt.subplot(212)\nn, bins, patches = plt.hist(shade2, 40, normed=1,facecolor='g', alpha=0.75)\nplt.xlabel('solar unavailable')\nplt.ylabel('probability')\nplt.text(60, .021, text2)\nplt.xlim(0,100)\nplt.grid(True)\n\nplt.show()","sub_path":"NatureConservancy/histPlot 2.py","file_name":"histPlot 2.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"638309759","text":"class Solution:\n \"\"\"\n @param s: a string\n @return bool: whether you can make s a palindrome by deleting at most one character\n \"\"\"\n\n # specifically break a loop, otherwise endless\n # memorizse two pointers loop\n\n def validPalindrome(self, s):\n # Write your code here\n left, right = 0, len(s) - 1\n left, right = self.two_pointers(s, 0, len(s) - 1) # find the place not equal\n if left >= right:\n return True\n\n return self.is_palindrome(s, left + 1, right) or self.is_palindrome(s, left, right - 1)\n\n def two_pointers(self, s, left, right):\n while left < right:\n if s[left] != s[right]:\n return left, right\n left += 1\n right -= 1\n return left, right\n\n def is_palindrome(self, s, left, right):\n left, right = self.two_pointers(s, left, right)\n return left >= right\n\n\nif __name__ == '__main__':\n solution = Solution()\n solution.validPalindrome('abcfdcba')\n","sub_path":"src/leetCode/twopointers/n891validate_palindrome.py","file_name":"n891validate_palindrome.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"348278215","text":"from my_zoo.hyperparams.default_config import DQNAgentParams,ExperimentParams, EnvParams\nfrom stable_baselines.deepq import MlpPolicy\nfrom zoo.utils import CustomDQNPolicy\n\n##########################################################\n# Env #\n##########################################################\nenv_params = EnvParams()\nenv_params.env_id = 'acrobot'\n\n#################\n# Policy #\n#################\npolicy = CustomDQNPolicy\n\n##########################################################\n# Agent Params #\n\n# Default values:\n# policy = 'MlpPolicy' # or 'CnnPolicy' or 'CustomDQNPolicy'\n# buffer_size = 50000\n# learning_rate = 1e-4\n# learning_starts = 1000\n# target_network_update_freq = 500\n# train_freq = 1\n# exploration_initial_eps = 1.0\n# exploration_final_eps = 0.02\n# exploration_fraction = 0.1\n# prioritized_replay_alpha = 0.6\n# prioritized_replay = False\n# param_noise = False\n# gamma = 0.99\n# batch_size = 32\n# double_q = True\n# prioritized_replay_beta0 = 0.4\n# prioritized_replay_beta_iters = None\n# prioritized_replay_eps = 1e-6\n# n_cpu_tf_sess = None\n# policy_kwargs = None\n##########################################################\nagent_params = DQNAgentParams()\n# here we can change the various parameters - for example, we can change the batch size\nagent_params.policy = policy\nagent_params.learning_rate = 1e-3\nagent_params.exploration_final_eps= 0.1\nagent_params.prioritized_replay = True\n\n\n\n##########################################################\n# Experiment #\n##########################################################\nexperiment_params = ExperimentParams()\nexperiment_params.n_timesteps = 1e5\nexperiment_params.env_params = env_params\nexperiment_params.agent_params = agent_params\nexperiment_params.name = __name__.split('.')[-1]\n\n\n\n\n\n","sub_path":"my_zoo/hyperparams/dqn_acrobot.py","file_name":"dqn_acrobot.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"132470422","text":"# coding: utf8\nimport pygame\nimport time\n\npygame.init()\npygame.mixer.init()\n\ninfoObject = pygame.display.Info()\nWIDTH = int(infoObject.current_w * 0.9)\nHEIGHT = int(infoObject.current_h * 0.9)\nFPS = 30\nfrase = 0\nscore = 0\n\ntext_lvl_1 = ['противник*=)*Делить на ноль можно.',\n 'игрок*=)*1 - Согласен / 2 - Нельзя',\n 'противник*=)*1 - Что ж, ты проиграл / 2 - Нет, можно',\n 'игрок*=(*Дурак! Нельзя!',\n 'противник*=)*Интересно, почему? 6 % 3 = 2; 6 % 1 = 6; 6 % 0.0001 = 60000. '\n 'Чем меньше делитель, тем больше будет частное. Ну, что скажешь?',\n 'игрок*=)*1 - Выучил правило из 5 класса и крутой? / 2 - Рассказать правило',\n 'противник*=)*Ясно. Давно стал очевиден один факт.',\n '...',\n 'противник*=)*Ты так думаешь лишь потому, что тебе так сказали. '\n 'Своей головой ты думать видимо не можешь. Ты же знаешь, что если '\n 'любое число разделить на себя, то получится единица? Следовательно: 0 / 0 = 1.',\n 'игрок*=(*Единица - твоя оценка по матеше.',\n '',\n '',\n '']\nSCORES = {\n 0: [None, None, None, None],\n 1: [0, 0, None, None],\n 2: [0, 0, None, None],\n 3: [None, None, None, None],\n 4: [None, None, None, None],\n 5: [None, None, None, None],\n 6: [0, 0, None, None],\n 7: [None, None, None, None],\n 8: [None, None, None, None],\n 9: [None, None, None, None],\n}\n\n\nclass Background(pygame.sprite.Sprite):\n def __init__(self, image_file, location):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(image_file)\n self.rect = (self.image.get_rect())\n self.rect.left, self.rect.top = location\n\n\ndef judge_1(screen):\n font = pygame.font.Font(None, int(HEIGHT * 0.04))\n text = font.render(\"Судья:\", True, (255, 127, 80))\n text_x = WIDTH * 0.04\n text_y = HEIGHT * 0.88\n screen.blit(text, (text_x, text_y))\n\n\ndef rules(screen):\n font = pygame.font.Font(None, int(WIDTH * 0.023))\n text = font.render(\"Чтобы выбрать действие - нажмите 1, 2, 3 или 4, а чтобы \"\n \"перелистнуть реплику - нажмите ПРОБЕЛ\", True, (255, 127, 80))\n text_x = WIDTH * 0.01\n text_y = HEIGHT * 0.88\n screen.blit(text, (text_x, text_y))\n\n\ndef show_text(text):\n a = text\n a = list(a)\n count = 0\n if len(a) + 1 > 110:\n for i in range(len(a) - 51):\n font = pygame.font.Font(None, int(WIDTH * 0.028))\n t = a[0:count]\n count += 1\n text = font.render(''.join(t), True, (255, 255, 255))\n text_x = WIDTH * 0.04\n text_y = HEIGHT * 0.92\n screen.blit(text, (text_x, text_y))\n clock.tick(30)\n pygame.display.flip()\n for i in range(len(a) - 50, len(a) + 3):\n font = pygame.font.Font(None, int(WIDTH * 0.028))\n t = a[len(a) - 52:count]\n count += 1\n text = font.render(''.join(t), True, (255, 255, 255))\n text_x = WIDTH * 0.04\n text_y = HEIGHT * 0.96\n screen.blit(text, (text_x, text_y))\n clock.tick(30)\n pygame.display.flip()\n else:\n for i in range(len(a) + 1):\n font = pygame.font.Font(None, int(WIDTH * 0.028))\n t = a[0:count]\n count += 1\n text = font.render(''.join(t), True, (255, 255, 255))\n text_x = WIDTH * 0.04\n text_y = HEIGHT * 0.92\n screen.blit(text, (text_x, text_y))\n clock.tick(30)\n pygame.display.flip()\n\n\ndef speak(screen, frase):\n element = text_lvl_1[frase]\n element = element.split('*')\n if element[0] == 'противник':\n screen.blit(bg_1, bg_1_rect)\n screen.blit(enemy_1_common, enemy_1_common_rect)\n pygame.draw.line(screen, BLACK,\n [0, HEIGHT * 0.94],\n [WIDTH, HEIGHT * 0.94], int(HEIGHT * 0.12))\n font = pygame.font.Font(None, int(WIDTH * 0.03))\n text = font.render(\"Противник:\", True, (255, 127, 80))\n text_x = WIDTH * 0.04\n text_y = HEIGHT * 0.88\n screen.blit(text, (text_x, text_y))\n pygame.display.flip()\n show_text(element[-1])\n else:\n screen.blit(bg_2, bg_2_rect)\n screen.blit(main_common, main_common_rect)\n pygame.draw.line(screen, BLACK,\n [0, HEIGHT * 0.94],\n [WIDTH, HEIGHT * 0.94], int(HEIGHT * 0.12))\n font = pygame.font.Font(None, int(WIDTH * 0.03))\n text = font.render(\"ГГ:\", True, (255, 127, 80))\n text_x = WIDTH * 0.04\n text_y = HEIGHT * 0.88\n screen.blit(text, (text_x, text_y))\n pygame.display.flip()\n show_text(element[-1])\n\n\nBLACK = (0, 0, 0)\n\npygame.mixer.music.load('GAME_FIGHT.mp3')\npygame.mixer.music.play()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"My Game\")\nclock = pygame.time.Clock()\nrunning = True\nstart = True\n\nbg = Background('фон-зал суда.jpg', [0, 0])\nbg = pygame.transform.scale(\n bg.image, (WIDTH,\n HEIGHT))\nbg_rect = bg.get_rect(\n center=(WIDTH // 2, HEIGHT // 2))\n\nbg_1 = Background('фон_противника.jpg', [0, 0])\nbg_1 = pygame.transform.scale(\n bg_1.image, (WIDTH,\n HEIGHT))\nbg_1_rect = bg_1.get_rect(\n center=(WIDTH // 2, HEIGHT // 2))\n\nbg_2 = Background('фон_героя.jpg', [0, 0])\nbg_2 = pygame.transform.scale(\n bg_2.image, (WIDTH,\n HEIGHT))\nbg_2_rect = bg_2.get_rect(\n center=(WIDTH // 2, HEIGHT // 2))\n\nmain_common = Background('main_common.PNG', [0, 0])\nmain_common = pygame.transform.scale(\n main_common.image, (int(WIDTH * 0.5),\n int(WIDTH * 0.5)))\nmain_common_rect = main_common.get_rect(\n center=(WIDTH // 4 - 40, HEIGHT * 3 // 4 - 50))\n\nenemy_1_common = Background('common-enemy.PNG', [0, 0])\nenemy_1_common = pygame.transform.scale(\n enemy_1_common.image, (int(WIDTH * 0.5),\n int(WIDTH * 0.5)))\nenemy_1_common_rect = enemy_1_common.get_rect(\n center=(WIDTH * 0.45 + WIDTH // 4, WIDTH // 2 - WIDTH * 0.1))\n\nold_man = Background('judge-smiling.png', [0, 0])\nold_man = pygame.transform.scale(\n old_man.image, (int(WIDTH * 0.5),\n int(WIDTH * 0.5)))\nold_man_rect = old_man.get_rect(\n center=(WIDTH * 0.45 + WIDTH // 4, WIDTH // 2 - WIDTH * 0.1))\n\n\nwhile running:\n if start:\n screen.fill(BLACK)\n clock.tick(FPS)\n screen.blit(bg, bg_rect)\n screen.blit(old_man, old_man_rect)\n pygame.draw.line(screen, BLACK,\n [0, HEIGHT * 0.94],\n [WIDTH, HEIGHT * 0.94], int(HEIGHT * 0.14))\n rules(screen)\n pygame.display.flip()\n time.sleep(3)\n pygame.draw.line(screen, BLACK,\n [0, HEIGHT * 0.94],\n [WIDTH, HEIGHT * 0.94], int(HEIGHT * 0.12))\n judge_1(screen)\n\n pygame.display.flip()\n\n show_text('Кто выиграет в споре - получит свободу и право сыграть в Великий лабиринт.')\n\n start = False\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n speak(screen, frase)\n frase += 1\n elif event.key == pygame.K_1:\n if SCORES[frase][0]:\n score += SCORES[frase][0]\n speak(screen, frase)\n frase += 1\n print(score)\n","sub_path":"level_1.py","file_name":"level_1.py","file_ext":"py","file_size_in_byte":8270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516145287","text":"import datetime\nfrom google.cloud import bigquery\nimport json\nimport time\nimport datetime\nimport pprint\n\n\nPROJECT_ID = 'sonic-progress-196808'\nDATASET_ID = 'MatchedData'\nTABLE = 'PartT'\nclient = bigquery.Client(project=PROJECT_ID)\ndataset_id = client.dataset(DATASET_ID)\nimport pandas\n\n\ntable_ref = dataset_id.table('monty_python2')\nrecords = [\n {'title': 'The Meaning of Life', 'release_year': 1983},\n {'title': 'Monty Python and the Holy Grail', 'release_year': 1975},\n {'title': 'Life of Brian', 'release_year': 1979},\n {\n 'title': 'And Now for Something Completely Different',\n 'release_year': 1971\n },\n]\n# Optionally set explicit indices.\n# If indices are not specified, a column will be created for the default\n# indices created by pandas.\nindex = ['Q24980', 'Q24980', 'Q24980', 'Q24980']\ndataframe = pandas.DataFrame(\n records, index=pandas.Index(index, name='wikidata_id'))\n\nprint(dataframe)\n\njob = client.load_table_from_dataframe(dataframe, table_ref, location='EU')\n\njob.result() # Waits for table load to complete.\n","sub_path":"Utk_temp.py","file_name":"Utk_temp.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634926624","text":"def setup():\n size(500,500)\n smooth()\n noLoop()\n noStroke()\n ellipseMode(CENTER)\ndef draw():\n background(255)\n border = 50\n float(border)\n nw = width - 2*border\n float(nw)\n nh = height - 2*border\n float(nh)\n number = 5\n float(number)\n nWstep = nw / number\n float(nWstep)\n nHstep = nh / number\n float(nHstep)\n for i in range(0, number, 1):\n for j in range(0, number, 1):\n x = border + j*nWstep + nWstep/2\n float(x)\n y = border + i*nHstep + nHstep/2\n float(y)\n size = 5 + (j+i)*10\n float(size)\n mColor = size*1.5\n float(mColor)\n fill(mColor, 20, 50)\n ellipse(x, y, size, size)\n fill(250)\n ellipse(x, y, 3, 3)\n","sub_path":"processing/sketch_5_0/sketch_5_2_listing23/sketch_5_2_listing23.pyde","file_name":"sketch_5_2_listing23.pyde","file_ext":"pyde","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593505512","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFile name: fasta_header.py\nAuthor: Ivan Munoz-Gutierrez\nDate created: 04/02/2021\nDate last modified: 04/18/2021\nPython version: 3.9\nDescription: Change header's name of the fasta sequences that are inside\n of the assembly.fasta file created by Unicycler. The\n headers are renamed following the next style:\n SWXXXX_method_length_topology\n\"\"\"\n\nimport sys\nimport os\nimport argparse\nimport textwrap\n\n\ndef user_input():\n \"\"\"\n Parse command line arguments provided by the user and provide help if\n requested by user or if there is a wrong command.\n\n Returns\n -------\n argparse object (.input and .output)\n .input : string\n Path to the fasta file that will be processed. This argument is\n mandatory.\n .output : string\n Path to the output directory. This argument is optional.\n \"\"\"\n # Creating a parser object for parsing arguments.\n parser = argparse.ArgumentParser(\n prog=\"python3 fasta_header.py\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent(\"\"\"\n Rename fasta headers of assembly.fasta files created by Unicycler\n following MSP's style.\n \"\"\"),\n epilog=textwrap.dedent(\"\"\"\n Headers are renamed following the next style:\n SWXXXX_method_length_topology.\n The \"method\" tag provides information regarding the methology used in\n the lab for the assembly. An example of a header's name following MSP's\n style is the following:\n\n isolate length topology\n |----| |-----| |------|\n >SW2315_n2760-R136-NB73-L1000-96NB_5000000_circular\n |---------method---------|\n\n Notice that the information provided by method is conected with dashes.\n\n Isolate's name and method is provided by the name of the directory that\n contains assembly.fasta. The length and topology is obtained by the\n information provided in the original header created by Unicycler. The\n renamed fasta sequences are saved in a file that follows the next\n style: SWXXXX_method_assembly.fasta. For example, the above header\n would be in a file named as follows:\n\n SW2315_n2760-R136-NB73-L1000-96NB_assembly.fasta\n\n Note:\n If the user doesn't provide an output path, the directory of the input\n fasta file will be used as the output path.\n \"\"\")\n )\n # Creating required arguments group.\n mandatory_arguments = parser.add_argument_group(\"required arguments\")\n mandatory_arguments.add_argument(\n \"-i\", \"--input\", required=True, help=\"Path to input fasta file\"\n )\n # Creating optional arguments.\n parser.add_argument(\n \"-o\", \"--output\", help=\"Path to output directory\"\n )\n # Saving parsed arguments.\n args = parser.parse_args()\n\n return args\n\n\ndef process_arguments(args):\n \"\"\"\n Process the command line arguments provided by the user.\n\n Parameters\n ----------\n args : argparse.Namespace\n Object holding the command line arguments provided by the user.\n\n Returns\n -------\n arguments : dictionary\n Information needed to rename headers of input file.\n Example:\n {\"input_file\": \"~/Documents/assemblies/assembly.fasta\",\n \"name_folder_infile\": \"assemblies\",\n \"output_folder\": \"~/Documents/results\"}\n\n Note:\n MSP's style of infile's folder name is described in the epilog variable of\n the user_input function.\n \"\"\"\n arguments = {\"input_file\": args.input}\n # Checking if user provided correct arguments.\n if not os.path.exists(args.input):\n sys.exit(1, message=textwrap.dedent(\"\"\"\\\n Error: path to fasta file doesn't exist\\n\"\"\"))\n if not os.path.isfile(os.path.abspath(args.input)):\n sys.exit(1, message=textwrap.dedent(\"\"\"\\\n Error: provided input argument is not a file\\n\"\"\"))\n if (args.output is not None) and (not os.path.exists(args.output)):\n sys.exit(1, message=textwrap.dedent(\"\"\"\\\n Error: path to output directory doesn't exist\\n\"\"\"))\n\n # Getting path to folder that contains input file.\n path_folder_infile = os.path.dirname(os.path.abspath(args.input))\n # Getting name of folder that contains input file.\n name_folder_infile = os.path.basename(path_folder_infile)\n\n # Checking if the name of the infile's folder has correct style name.\n arguments[\"name_folder_infile\"] = check_name_folder_infile(\n name_folder_infile) \n\n # Getting path to output folder.\n if args.output is None:\n path_output = path_folder_infile\n else:\n path_output = args.output\n arguments[\"path_output\"] = path_output\n\n return arguments\n\n\ndef check_name_folder_infile(name_folder_infile):\n \"\"\"\n Check the correct style of infile's folder name according to MSP.\n\n Parameters\n ----------\n name_folder_infile : string\n\n Returns\n -------\n name : string\n Infile's folder name with the correct style.\n\n Note:\n MSP's style of infile's folder name is described in the epilog variable of\n the user_input function.\n \"\"\"\n # If name has a dash at its end, remove it.\n if name_folder_infile[len(name_folder_infile) - 1:] == '-':\n name_folder_infile = name_folder_infile[: -1]\n # If name_folder_infile has underscore at its end, remove it.\n if name_folder_infile[len(name_folder_infile) - 1:] == '_':\n name_folder_infile = name_folder_infile[: -1]\n # Split name_folder_infile using \"_\" as delimiter.\n name_folder_infile = name_folder_infile.split(\"_\")\n # Lengh of name_folder_infile.\n length_name_folder_infile = len(name_folder_infile)\n # Iterate over name_folder_infile to reconect tags with the correct style.\n for index, tag in enumerate(name_folder_infile):\n if index == 0:\n name = tag + \"_\"\n elif index == (length_name_folder_infile - 1):\n name += tag + \"_\"\n else:\n name += tag + \"-\"\n\n return name\n\n\ndef make_new_header(header, name_folder_infile):\n \"\"\"\n Make new fasta header according to MSP style.\n\n Parameters\n ----------\n header : string\n Header of fasta sequence.\n name_folder_infile : string\n Name of folder that contains the fasta file being processed.\n\n Returns\n -------\n new_header : string\n Renamed header of fasta sequence.\n\n Note:\n MSP's style of infile's folder name is described in the epilog variable of\n the user_input function.\n \"\"\"\n # Information needed.\n length = \"\"\n topology = \"\"\n # Spliting header into list.\n header = header.split(\" \")\n # Looping over header to get info.\n for info in header:\n if \"length\" in info:\n info = info.split(\"=\")\n length = info[1].replace(\"\\n\", \"\")\n elif \"circular\" in info:\n info = info.split(\"=\")\n topology = info[0].replace(\"\\n\", \"\")\n # If no info of topology the molecule is linear.\n if topology == \"\":\n topology = \"linear\"\n\n return \">\" + name_folder_infile + length + \"_\" + topology + \"\\n\"\n\n\ndef rename_headers(input_file, name_folder_infile, path_output):\n \"\"\"\n Make a fasta file with renamed headers according to MSP's style.\n\n Parameters\n ----------\n input_file : string\n Path to input fasta file.\n name_folder_infile : string\n Folder's name that contains the input fasta file.\n path_output : string\n Path to output directory.\n\n Note:\n MSP's style of infile's folder name is described in the epilog variable of\n the user_input function.\n \"\"\"\n # Opening input file for reading.\n with open(input_file, \"r\") as file_reader:\n # Path to outfile.\n outfile = path_output + \"/\" + name_folder_infile + \"assembly.fasta\"\n # Opening output file for writing.\n with open(outfile, \"w\") as file_writer:\n # Iterating over file_reader.\n for line in file_reader:\n # Checking if line is a header:\n if line.startswith(\">\"):\n # Change header's name.\n new_header = make_new_header(line, name_folder_infile)\n file_writer.write(new_header)\n else:\n file_writer.write(line)\n\n\ndef main():\n \"\"\"Run the script\"\"\"\n # Getting user input.\n args = user_input()\n # Processing arguments provided by user to get input for the function\n # rename_headers.\n input_rename_headers = process_arguments(args)\n # Renaming headers\n rename_headers(\n input_rename_headers[\"input_file\"],\n input_rename_headers[\"name_folder_infile\"],\n input_rename_headers[\"path_output\"])\n # If everything went well print a message\n print(\"Headers were succesfully renamed!\")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"fasta_header.py","file_name":"fasta_header.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122701574","text":"# -*- coding: utf-8 -*-\r\n\"\"\" \r\n@Time : 2018/1/4 17:21\r\n@Author : Zhu Junwei\r\n@File : shuffle_list.py\r\n\"\"\"\r\nimport random\r\n\r\nfile = open('sample_list_new.txt','r')\r\nlines = file.readlines()\r\nfile.close()\r\nrandom.shuffle(lines)\r\nnewfile = open('sample_list_random.txt','w')\r\nfor line in lines:\r\n newfile.write(line)\r\nnewfile.close()\r\nline_num = len(lines)\r\nval_num = int(line_num / 10)\r\ntrain = open('train.txt', 'w')\r\nval = open('val.txt', 'w')\r\nidx = 0\r\nfor line in lines:\r\n idx += 1\r\n if idx > val_num:\r\n train.write(line)\r\n else:\r\n val.write(line)\r\n\r\ntrain.close()\r\nval.close()","sub_path":"pic_evaluator/shuffle_list.py","file_name":"shuffle_list.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319123639","text":"from django.shortcuts import render\nfrom dispatcher.tasks import run_command\n# Create your views here.\nfrom django import forms\nfrom .models import Task\nfrom inventory.models import Server\n\nclass RunForm(forms.Form):\n command = forms.CharField(label='Command', max_length=100)\n\n\ndef main(request):\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = RunForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n task = Task()\n task.name = 'Test async task'\n task.inventory = Server.objects.get(id=1)\n x = run_command.delay(form.cleaned_data['command'])\n task.uuid = x.task_id\n task.save()\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = RunForm()\n tasks = Task.objects.all().order_by('-created_at')\n return render(request, 'tasks/index.html', {'form':form, 'tasks':tasks})","sub_path":"tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"256832874","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseNotFound, Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom website.forms.comment import CommentForm\nfrom website.forms.screens import ScreenUploadForm\nfrom website.models import Screen, Comment\n\n\n@login_required\ndef upload(request):\n if request.method == 'GET':\n form = ScreenUploadForm()\n return render(request, 'upload.html', {'form': form})\n elif request.method == 'POST':\n # Store this stuff into the databse and redirect\n form = ScreenUploadForm(request.POST, request.FILES)\n if form.is_valid():\n #screen = Screen(caption=form.cleaned_data['caption'], image=form.cleaned_data['image'],\n # team=form.cleaned_data['team'])\n #screen.save()\n form.save()\n return redirect('screens:upload')\n else:\n return HttpResponseNotFound('Oops')\n\n\n@login_required\ndef view_screen(request, screen_id):\n screen = get_object_or_404(Screen, pk=screen_id)\n comments = Comment.objects.filter(screen=screen)\n form = CommentForm()\n form.helper.form_action = reverse('screens:comment', kwargs={'screen_id': screen.id})\n return render(request, 'view_screen.html', {'screen': screen, 'comments': comments, 'comment_form': form})\n\n\n@login_required\ndef review(request):\n \"\"\"\n View screen shots\n :param request:\n :return:\n \"\"\"\n screens_list = Screen.objects.all()\n paginator = Paginator(screens_list, 1)\n page = request.GET.get('page')\n\n try:\n screens = paginator.page(page)\n except PageNotAnInteger:\n screens = paginator.page(1)\n except EmptyPage:\n screens = paginator.page(paginator.num_pages)\n current_screen = screens.object_list.all()[0]\n form = CommentForm()\n form.helper.form_action = reverse('screens:comment', kwargs={'screen_id': current_screen.id})\n comments = Comment.objects.filter(screen=current_screen)\n user = request.user\n\n return render(request, 'review.html',\n {'screens': screens, 'comments': comments, 'comment_form': form, 'current_user': user})\n\n\ndef comment(request, screen_id):\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n screen = Screen.objects.get(id=screen_id)\n new_comment = Comment(screen=screen, comment=form.cleaned_data['comment'], created_by_user=request.user)\n new_comment.save()\n return redirect(request.META['HTTP_REFERER'])\n elif request.method == 'GET':\n raise Http404","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"180747783","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"VGG for ImageNet using TL models.\"\"\"\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport tensorlayer as tl\nfrom tensorlayer.models.imagenet_classes import class_names\n\ntf.logging.set_verbosity(tf.logging.DEBUG)\ntl.logging.set_verbosity(tl.logging.DEBUG)\n\n\n# get the whole model\nsess = tf.InteractiveSession()\nvgg = tl.models.vgg.vgg16(pretrained=True, sess=sess)\n# sess.run(tf.global_variables_initializer())\n\n\nx = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])\ny = vgg(x, is_train=False)\n\nimg1 = tl.vis.read_image('data/tiger.jpeg')\nimg1 = tl.prepro.imresize(img1, (224, 224))\nimg1 = img1.astype(np.float32)\nmean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3])\nimg1 = img1 - mean\n\nstart_time = time.time()\noutput = sess.run(y, feed_dict={x: img1})\nprobs = tf.nn.softmax(output)[0].eval()\nprint(\" End time : %.5ss\" % (time.time() - start_time))\npreds = (np.argsort(probs)[::-1])[0:5]\nfor p in preds:\n print(class_names[p], probs[p])\n","sub_path":"examples/pretrained_cnn/tutorial_models_vgg_static.py","file_name":"tutorial_models_vgg_static.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"296422792","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nn = np.array([100, 1000, 1500, 10000,16000])\nt1 = np.array([1.56, 4.65, 5.403, 14.04, 17.04,])\nt2 = np.array([6.23, 7.5, 8.18, 14.82, 18.87])\n\nn2 = np.array([2, 4, 16, 32, 50])\nt3 = np.array([6.83, 7.68, 12.124, 17.25, 23.33])\nt4 = np.array([6.24, 6.35, 7.902, 8.177, 13.14])\n\nfig = plt.figure()\nplt.subplot(211)\nplt.plot(n2,t3)\nplt.plot(n2,t4)\nplt.legend(['PPCA(EM)', 'RPCA'])\nplt.suptitle('Comparision of PPCA(EM) and RPCA for Corrupted Values values')\nplt.xlabel('% of Corrupted Value')\nplt.ylabel('Error in %')\n\n\nplt.subplot(222)\nplt.plot(n,t1)\nplt.plot(n,t2)\nplt.legend(['PPCA(EM)', 'RPCA'])\nplt.suptitle('Comparision of PPCA(EM) and RPCA for Missing values')\nplt.xlabel('Number of missing values')\nplt.ylabel('Error in %')\nplt.show()\nfig.savefig('MissingValues.jpg')\n\n","sub_path":"submission2/Codes/PlottingGraphs/MissingComparision.py","file_name":"MissingComparision.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555184781","text":"import sys\n\nfrom PyQt4 import QtGui\nfrom PyQt4.QtCore import Qt\n\n# from PyQt4.QtCore import *\n\n\ndef tree2nodes(tree, n=0):\n edges = []\n result = [tree]\n if \"children\" in tree:\n for c in tree[\"children\"]:\n childnodes, childedges = tree2nodes(c, n + len(result))\n edges.append((n, n + len(result)))\n edges += childedges\n result += childnodes\n return result, edges\n\n\ndef tree2graph(tree):\n\n if tree:\n nodes, edges = tree2nodes(tree)\n if edges:\n g = igraph.Graph(edges)\n layout = g.layout_reingold_tilford(root=[0])\n return zip(nodes, layout.coords), edges\n else:\n return [], []\n else:\n return [], []\n\n\nclass MainWindow(QtGui.QWidget):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.init_ui()\n\n def init_ui(self):\n treeview = TreeDraw()\n\n model = QtGui.QTextEdit()\n program = QtGui.QTextEdit()\n button = QtGui.QPushButton(\"Ground\")\n button.setMinimumHeight(50)\n\n hbox = QtGui.QHBoxLayout()\n\n vbox = QtGui.QVBoxLayout()\n # vbox.addStretch(1)\n vbox.addWidget(model)\n vbox.addWidget(button)\n vbox.addWidget(program)\n model.setMaximumWidth(400)\n program.setMaximumWidth(400)\n\n hbox.addWidget(treeview)\n hbox.addLayout(vbox)\n\n self.setLayout(hbox)\n\n self.setGeometry(50, 50, 1024, 768)\n self.setWindowTitle(\"Interactive Grounder\")\n self.show()\n\n\nclass TreeDraw(QtGui.QWidget):\n\n NODE_WIDTH = 240\n NODE_HEIGHT = 30\n\n def __init__(self):\n super(TreeDraw, self).__init__()\n\n self.tree = {}\n\n def paintEvent(self, event):\n p = QtGui.QPainter()\n p.begin(self)\n self.drawTree(p)\n p.end()\n\n def drawTree(self, p):\n tree = self.tree\n coordnodes, coordedges = tree2graph(self.tree)\n\n if coordnodes:\n\n minx = min([c[0] for n, c in coordnodes])\n maxx = max([c[0] for n, c in coordnodes])\n miny = min([c[1] for n, c in coordnodes])\n maxy = max([c[1] for n, c in coordnodes])\n\n sy = float(maxy - miny) / (self.height())\n sx = float(maxx - minx) / (self.width())\n\n self.coordinates = []\n self.nodes = []\n\n for node, coord in coordnodes:\n x, y = coord\n if sx > 0:\n x = (x - minx) / sx\n if sy > 0:\n y = (y - miny) / sy\n\n self.coordinates.append((x, y))\n self.nodes.append(node)\n self.drawNode(p, (x, y), node)\n\n for a, b in coordedges:\n x1, y1 = self.coordinates[a]\n x2, y2 = self.coordinates[b]\n p.drawLine(\n x1 + self.NODE_WIDTH / 2,\n y1 + self.NODE_HEIGHT,\n x2 + self.NODE_WIDTH / 2,\n y2,\n )\n\n def drawNode(self, p, coord, node):\n x, y = coord\n\n color = QtGui.QColor(255, 255, 255)\n if node[\"name\"].startswith(\"message\"):\n color = QtGui.QColor(255, 255, 0)\n elif node.get(\"cycle\"):\n color = QtGui.QColor(255, 0, 0)\n\n p.fillRect(x, y, self.NODE_WIDTH, self.NODE_HEIGHT, color)\n p.drawRect(x, y, self.NODE_WIDTH, self.NODE_HEIGHT)\n p.drawText(\n x,\n y,\n self.NODE_WIDTH,\n self.NODE_HEIGHT,\n Qt.AlignVCenter | Qt.AlignLeft,\n node[\"text\"],\n )\n\n def mousePressEvent(self, event):\n print(\"clicked\", event.x(), event.y())\n x = event.x()\n y = event.y()\n for i, c in enumerate(self.coordinates):\n if (\n c[0] <= x <= c[0] + self.NODE_WIDTH\n and c[1] <= y <= c[1] + self.NODE_HEIGHT\n ):\n name = self.nodes[i][\"name\"]\n if name.startswith(\"message_\"):\n index = int(name[8:])\n message_order.make_choice(index)\n\n\ndef main(**kwdargs):\n app = QtGui.QApplication(sys.argv)\n ex = MainWindow()\n sys.exit(app.exec_())\n\n\ndef argparser():\n import argparse\n\n parser = argparse.ArgumentParser()\n # parser.add_argument('inputfile')\n return parser\n\n\nif __name__ == \"__main__\":\n main(**vars(argparser().parse_args()))\n","sub_path":"usecases/interactive_grounder/nice_gui.py","file_name":"nice_gui.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"360772339","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom setuptools import setup, find_packages\nimport os\n\n\nversion = '0.2.3'\n\n\nCLASSIFIERS = [\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Framework :: Django'\n]\n\n\ndef long_description():\n with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp:\n return fp.read()\n\nsetup(\n name='django-disguise',\n author='Mikhail Porokhovnichenko',\n version=version,\n author_email='marazmiki@gmail.com',\n url='https://github.com/marazmiki/django-disguise',\n description=('This django application allows superuser to '\n '\"disguise\" into any user'),\n long_description=long_description(),\n license='MIT license',\n platforms=['OS Independent'],\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=['test_project', 'test_project.*']),\n test_suite='tests.main',\n include_package_data=True,\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149368192","text":"from qiling.os.uefi.runtime import hook_GetVariable, hook_GetNextVariableName, hook_SetVariable, \\\n hook_QueryVariableInfo\n\nfrom .smm_sw_dispatch_type import EFI_SMM_SW_DISPATCH_PROTOCOL\nfrom qiling.const import *\nfrom qiling.os.const import *\nfrom qiling.os.uefi.const import *\nfrom .smm_sw_dispatch_type import *\nfrom qiling.os.uefi.fncc import *\nimport ctypes\n\npointer_size = ctypes.sizeof(ctypes.c_void_p)\n\nsmram = 0\n\n@dxeapi(params={\n \"This\": POINTER, #POINTER_T(struct__EFI_SMM_SW_DISPATCH2_PROTOCOL)\n \"DispatchFunction\": POINTER, #POINTER_T(ctypes.CFUNCTYPE(ctypes.c_uint64, POINTER_T(None), POINTER_T(None), POINTER_T(None), POINTER_T(ctypes.c_uint64)))\n \"RegisterContext\": POINTER, #POINTER_T(struct_EFI_SMM_SW_REGISTER_CONTEXT)\n \"DispatchHandle\": POINTER, #POINTER_T(POINTER_T(None))\n})\ndef hook_SMM_SW_DISPATCH_Register(ql, address, params):\n smi_num = int.from_bytes(ql.mem.read(params['RegisterContext'], 8), 'little')\n ql.os.smm.swsmi_handlers.append((smi_num, params))\n return EFI_SUCCESS\n \n@dxeapi(params={\n \"This\": POINTER, #POINTER_T(struct__EFI_SMM_SW_DISPATCH2_PROTOCOL)\n \"DispatchHandle\": POINTER, #POINTER_T(None)\n})\ndef hook_SMM_SW_DISPATCH_UnRegister(ql, address, params):\n return EFI_UNSUPPORTED\n\ndef install_EFI_SMM_SW_DISPATCH_PROTOCOL(ql, start_ptr):\n efi_smm_sw_dispatch_protocol = EFI_SMM_SW_DISPATCH_PROTOCOL()\n ptr = start_ptr + ctypes.sizeof(EFI_SMM_SW_DISPATCH_PROTOCOL)\n pointer_size = 8\n\n efi_smm_sw_dispatch_protocol.Register = ptr\n ql.hook_address(hook_SMM_SW_DISPATCH_Register, ptr)\n ptr += pointer_size\n\n efi_smm_sw_dispatch_protocol.UnRegister = ptr\n ql.hook_address(hook_SMM_SW_DISPATCH_UnRegister, ptr)\n ptr += pointer_size\n\n return (ptr, efi_smm_sw_dispatch_protocol)\n\n","sub_path":"smm/protocols/smm_sw_dispatch_protocol.py","file_name":"smm_sw_dispatch_protocol.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336266166","text":"import random\nprint(\"Jogo de dados\")\nem_jogo = True\n# i = 1\n# while i == 1:\nwhile em_jogo:\n numero = 0\n while numero < 1 or numero > 6:\n try:\n numero = int(input(\"Por favor digite um numero de 1 a 6\"))\n except:\n print(\"Por favor digite um número valido na faixa entre 1 e 6\")\n dado = random.randint(1, 6)\n if numero == dado:\n print(\"Parabéns você acertou\")\n else:\n # print(\"Que pena você perdeu o dado mostrou o numero {}\".format(dado))\n print(f\"Que pena você perdeu o dado mostrou o numero {dado}\")\n resposta = input(\"Você deseja jogar novamente (S/N)\")\n if resposta == 'N':\n em_jogo = False\n # i = 2\n # if resposta == 'N':\n # break\nprint(\"Fim do jogo\")\n","sub_path":"djd-prog2/manha-aula5/jogo-dado-com-try.py","file_name":"jogo-dado-com-try.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"159094245","text":"#~/usr/bin/env python\n\nimport nacl.secret\nimport nacl.utils\n\n\"\"\"\nPart 2\n\"\"\"\n\nif __name__ == '__main__':\n with open('part2.ciphertext.bin', 'rb') as ct:\n with open('part2.key.bin', 'rb') as key:\n box = nacl.secret.SecretBox(key.read())\n print(str(box.decrypt(ct.read())))\n","sub_path":"Part2.py","file_name":"Part2.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"99810187","text":"import FWCore.ParameterSet.Config as cms\n\n# step 4\n\n# seeding\n#from FastSimulation.Tracking.IterativeFourthSeedProducer_cff import *\nimport FastSimulation.Tracking.TrajectorySeedProducer_cfi\niterativePixelLessSeeds = FastSimulation.Tracking.TrajectorySeedProducer_cfi.trajectorySeedProducer.clone()\niterativePixelLessSeeds.firstHitSubDetectorNumber = [3]\niterativePixelLessSeeds.firstHitSubDetectors = [3, 4, 6]\niterativePixelLessSeeds.secondHitSubDetectorNumber = [3]\niterativePixelLessSeeds.secondHitSubDetectors = [3, 4, 6]\niterativePixelLessSeeds.thirdHitSubDetectorNumber = [0]\niterativePixelLessSeeds.thirdHitSubDetectors = []\niterativePixelLessSeeds.seedingAlgo = ['PixelLessPairs']\n###iterativePixelLessSeeds.minRecHits = [5]\niterativePixelLessSeeds.minRecHits = [3]\niterativePixelLessSeeds.pTMin = [0.3]\n#cut on fastsim simtracks. I think it should be removed for the 4th step\n#iterativePixelLessSeeds.maxD0 = [20.]\n#iterativePixelLessSeeds.maxZ0 = [50.]\niterativePixelLessSeeds.maxD0 = [99.]\niterativePixelLessSeeds.maxZ0 = [99.]\n#-----\niterativePixelLessSeeds.numberOfHits = [3]\n#values for the seed compatibility constraint\niterativePixelLessSeeds.originRadius = [1.0]\niterativePixelLessSeeds.originHalfLength = [12.0]\niterativePixelLessSeeds.originpTMin = [0.4] # was 0.6\niterativePixelLessSeeds.zVertexConstraint = [-1.0]\niterativePixelLessSeeds.primaryVertices = ['none']\n\niterativePixelLessSeeds.newSyntax = True\n#iterativePixelLessSeeds.layerList = ['TIB1+TIB2',\n# 'TIB1+TID1_pos','TIB1+TID1_neg',\n# 'TID3_pos+TEC1_pos','TID3_neg+TEC1_neg',\n# 'TID1_pos+TID2_pos','TID2_pos+TID3_pos',\n# 'TEC1_pos+TEC2_pos','TEC2_pos+TEC3_pos','TEC3_pos+TEC4_pos','TEC3_pos+TEC5_pos','TEC4_pos+TEC5_pos',\n# 'TID1_neg+TID2_neg','TID2_neg+TID3_neg',\n# 'TEC1_neg+TEC2_neg','TEC2_neg+TEC3_neg','TEC3_neg+TEC4_neg','TEC3_neg+TEC5_neg','TEC4_neg+TEC5_neg']\nfrom RecoTracker.IterativeTracking.PixelLessStep_cff import pixelLessStepSeedLayers\niterativePixelLessSeeds.layerList = pixelLessStepSeedLayers.layerList\n\n# candidate producer\n#from FastSimulation.Tracking.IterativeFourthCandidateProducer_cff import *\nimport FastSimulation.Tracking.TrackCandidateProducer_cfi\niterativePixelLessTrackCandidates = FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone()\niterativePixelLessTrackCandidates.SeedProducer = cms.InputTag(\"iterativePixelLessSeeds\",\"PixelLessPairs\")\niterativePixelLessTrackCandidates.TrackProducers = ['initialStepTracks', 'lowPtTripletStepTracks', 'pixelPairStepTracks', 'detachedTripletStepTracks','mixedTripletStepTracks'] # add 0 and 0.5 ?\niterativePixelLessTrackCandidates.KeepFittedTracks = False\niterativePixelLessTrackCandidates.MinNumberOfCrossedLayers = 6 # was 5\n\n\n# track producer\n#from FastSimulation.Tracking.IterativeFourthTrackProducer_cff import *\nimport RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi\niterativePixelLessTracks = RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi.ctfWithMaterialTracks.clone()\niterativePixelLessTracks.src = 'iterativePixelLessTrackCandidates'\niterativePixelLessTracks.TTRHBuilder = 'WithoutRefit'\n##iterativePixelLessTracks.Fitter = 'KFFittingSmootherWithOutlierRejection'\niterativePixelLessTracks.Fitter = 'KFFittingSmootherFourth'\niterativePixelLessTracks.Propagator = 'PropagatorWithMaterial'\n\n\n# track merger\n#from FastSimulation.Tracking.IterativeFourthTrackMerger_cfi import *\npixelLessStepTracks = cms.EDProducer(\"FastTrackMerger\",\n TrackProducers = cms.VInputTag(cms.InputTag(\"iterativePixelLessTrackCandidates\"),\n cms.InputTag(\"iterativePixelLessTracks\")),\n RemoveTrackProducers = cms.untracked.VInputTag(cms.InputTag(\"initialStepTracksr\"),\n cms.InputTag(\"lowPtTripletStepTracks\"), \n cms.InputTag(\"pixelPairStepTracks\"), \n cms.InputTag(\"detachedTripletStepTracks\"), \n cms.InputTag(\"mixedTripletStepTracks\")), \n trackAlgo = cms.untracked.uint32(9),\n MinNumberOfTrajHits = cms.untracked.uint32(6), # was 5\n MaxLostTrajHits = cms.untracked.uint32(0)\n )\n\n# track selection\nimport RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi\npixelLessStepSelector = RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.multiTrackSelector.clone(\n src='pixelLessStepTracks',\n trackSelectors= cms.VPSet(\n RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(\n name = 'pixelLessStepLoose',\n chi2n_par = 0.5,\n res_par = ( 0.003, 0.001 ),\n minNumberLayers = 5,\n maxNumberLostLayers = 1,\n minNumber3DLayers = 3,\n d0_par1 = ( 1.5, 4.0 ),\n dz_par1 = ( 1.5, 4.0 ),\n d0_par2 = ( 1.5, 4.0 ),\n dz_par2 = ( 1.5, 4.0 )\n ),\n RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(\n name = 'pixelLessStepTight',\n preFilterName = 'pixelLessStepLoose',\n chi2n_par = 0.35,\n res_par = ( 0.003, 0.001 ),\n minNumberLayers = 5,\n maxNumberLostLayers = 0,\n minNumber3DLayers = 3,\n d0_par1 = ( 1.2, 4.0 ),\n dz_par1 = ( 1.2, 4.0 ),\n d0_par2 = ( 1.2, 4.0 ),\n dz_par2 = ( 1.2, 4.0 )\n ),\n RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(\n name = 'pixelLessStep',\n preFilterName = 'pixelLessStepTight',\n chi2n_par = 0.25,\n res_par = ( 0.003, 0.001 ),\n minNumberLayers = 5,\n maxNumberLostLayers = 0,\n minNumber3DLayers = 3,\n d0_par1 = ( 1., 4.0 ),\n dz_par1 = ( 1., 4.0 ),\n d0_par2 = ( 1., 4.0 ),\n dz_par2 = ( 1., 4.0 )\n ),\n ) #end of vpset\n ) #end of clone\n\n\n# sequence\niterativePixelLessStep = cms.Sequence(iterativePixelLessSeeds+\n iterativePixelLessTrackCandidates+\n iterativePixelLessTracks+\n pixelLessStepTracks+\n pixelLessStepSelector)\n\n","sub_path":"FastSimulation/Tracking/python/IterativePixelLessStep_cff.py","file_name":"IterativePixelLessStep_cff.py","file_ext":"py","file_size_in_byte":7466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"183107857","text":"# coding: utf-8\n# @author: hongxin\n# @date: 9/16/18\n\n\ndef select_sort(arr):\n \"\"\"\n 1.选择排序, 不断选出列表中的最小值, 并将其移动到最左边.\n - 选中第一个数, 分别与其它数比较, 选出最小值\n - 选中第二个数,分别与其它数进行比较, 选出最小值\n - 重复\n 2.最好, 最坏复杂度都是O(n2)\n 因为遍历一次只能知道哪个数最小, 所以最好最坏都是O(n2)\n 3. 不稳定:比如序列5 8 5 2 9, 我们知道第一遍选择第1个元素5会和2交换,那么原序列中2个5的相对前后顺序就被破坏了,所以选择排序不是一个稳定的排序算法\n :param arr:\n :return:\n \"\"\"\n\n for i in range(0, len(arr)):\n for k in range(i+1, len(arr)):\n print(arr[k])\n if arr[i] > arr[k]:\n arr[i], arr[k] = arr[k], arr[i] # 两两比较, 把小的数移到左边\n return arr\n\n\n","sub_path":"algorithm/sort_algorithm/sort_select.py","file_name":"sort_select.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234214283","text":"import pandas as pd\nimport numpy as np\nimport yfinance as yf\nfrom datetime import datetime, timedelta\n\ndef sma(price_data):\n ans = pd.DataFrame(0, index=price_data.index, columns=['price', 'sma'])\n ans['price'] = price_data\n ans['sma'] = price_data.rolling(window=20).mean()\n return ans\n\ndef bollinger_bands(price_data):\n ans = pd.DataFrame(0, index=price_data.index, columns=['upper_band', 'lower_band', 'price', 'bp'])\n rolling_mean = price_data.rolling(window=20).mean()\n standard_dev = price_data.rolling(window=20).std()\n ans['upper_band'] = rolling_mean + (2*standard_dev)\n ans['lower_band'] = rolling_mean - (2*standard_dev)\n\n ans['bp'] = (price_data - ans['lower_band'])/(ans['upper_band']-ans['lower_band'])*100\n ans['price'] = price_data\n return ans\n\ndef volatility(price_data):\n ans = pd.DataFrame(0, index=price_data.index, columns=['price', 'volatility'])\n ans['price'] = price_data\n ans['volatility'] = price_data.rolling(window=10).std()\n return ans\n\n\n\ndef calculate_scores():\n f = open(\"../stock_data/sandp500.txt\", \"r\")\n symbols = f.read().splitlines()\n f.close()\n\n scores = []\n for symbol in symbols:\n data = yf.Ticker(symbol)\n tickdf = data.history(period='1d',start=datetime.today()- timedelta(days=90), end=datetime.today())\n try:\n error_message = yf.shared._ERRORS[symbol]\n continue\n except:\n pass\n\n if tickdf.shape[0] < 30:\n print(symbol)\n continue\n stock_vol = volatility(tickdf['Open'])\n stock_boll = bollinger_bands(tickdf['Open'])\n stock_sma = sma(tickdf['Open'])\n\n mean_vol = stock_vol[\"volatility\"].mean()\n flag = 0\n last_price = stock_sma.ix[-2,'price']\n current_price = stock_sma.ix[-1,'price']\n\n # Bollinger Calculation\n upper_band = stock_boll.ix[-1,'upper_band']\n lower_band = stock_boll.ix[-1,'lower_band']\n bp = stock_boll.ix[-1,'bp']\n\n if (last_price >= upper_band) and (current_price <= upper_band):\n flag -= 1\n elif (last_price <= lower_band) and (current_price >= lower_band):\n flag += 1\n\n if (last_price >= current_price) and (bp >= 70):\n flag -= bp/100\n elif (last_price <= current_price) and (bp <= 30):\n flag += (100-bp)/100\n\n # Volatility\n curr_vol = stock_vol.ix[-1,'volatility']\n vol_score = (mean_vol-curr_vol)/(mean_vol*2)\n flag += vol_score\n entry = (symbol, flag)\n scores.append(entry)\n scores = sorted(scores, reverse=True, key=lambda x:x[1])\n res_string = \"\"\n for i in scores[:8]:\n res_string += i[0]\n res_string += \", \"\n res_string += str(round(i[1], 2))\n res_string += \"\\n\"\n \n \n f = open(\"../outputs/No_SMA_Top_Scores.txt\", \"w\")\n f.write(res_string)\n f.close()\n \n f = open(\"../outputs/No_SMA_all_scores.txt\", \"w\")\n res_string = \"\"\n for i in scores:\n res_string += i[0]\n res_string += \", \"\n res_string += str(round(i[1], 2))\n res_string += \"\\n\"\n f.write(res_string)\n f.close()\n\n","sub_path":"scripts/calculate_without_sma.py","file_name":"calculate_without_sma.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216584435","text":"#题目:一个数如果恰好等于它的因子之和,这个数就称为“完数”。例如6=1+2+3.编程找出1000以内的所有完数。\n\nfor i in range(1,1001):\n a=0\n for j in range(1,i+1):\n if i%j==0 and i>j:\n a+=j\n if i==a:\n print (i)\n\n","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"96196061","text":"from HyperAPI.hdp_api.routes import Resource, Route\n\n\nclass Exports(Resource):\n name = \"Exports\"\n\n class _export(Route):\n name = \"export\"\n httpMethod = Route.GET\n path = \"/exports/{file_name}\"\n _path_keys = {\n 'file_name': Route.VALIDATOR_ANY,\n }\n","sub_path":"HyperAPI/hdp_api/routes/exports.py","file_name":"exports.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"285430661","text":"from sqlalchemy import create_engine, and_\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import text\nfrom sqlalchemy.ext.automap import automap_base\nfrom functools import lru_cache\nfrom swagger_server.models import *\nimport logging\n\nlogging.basicConfig(filename='check.log', format='%(asctime)s %(levelname)s %(name)s: %(message)s')\nengine = create_engine('postgresql://postgres:Thienphu1@localhost:5432/ManagementStudents', hide_parameters=True,\n echo=True, query_cache_size=500)\nSession = sessionmaker()\n# config engine with session api\nSession.configure(bind=engine)\nsession = Session()\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n# instant of table Teachers\nTeachers_instants = Base.classes.teachers\n# instant of table Courses\nCourses_instants = Base.classes.courses\n# instant of table Classes\nClasses_instants = Base.classes.classes\n# instant of table students\nStudents_instants = Base.classes.students\n# instant of table events\nEvents_instants = Base.classes.events\n# instant of table exam_results\nExam_results_instants = Base.classes.exam_results\n# instant of table registrations\nRegistrations_instants = Base.classes.registrations\n# instant of table exams\nExams_instants = Base.classes.exams\n# instant of table plan\nPlans_instants = Base.classes.plans\n# instant of table user\nUsers_instants = Base.classes.users\n\nrole1 = Base.classes.authorization_each_role\nrole2 = Base.classes.authorization_each_author\n\n\n# get data\n\n# count all record\n# func to gen authorization for each role\n\n@lru_cache(maxsize=None)\ndef get_permis_each_role(role, per_id):\n role = role.strip()\n res = session.query(role1).filter(and_(role1.role_name == role, role1.per_id == per_id)).first()\n if res == None or res == '':\n return None\n return res\n\n\n# func to gen authorization for each user\n@lru_cache(maxsize=None)\ndef get_permis_each_author(user_id, per_id):\n res = session.query(role2).filter(and_(role2.user_id == user_id, role2.per_id == per_id)).first()\n if res == None or res == '':\n return None\n return res\n\n\n# get id in permis\n@lru_cache(maxsize=None)\ndef get_per_id(permis):\n with engine.begin() as conn:\n res = list(conn.execute(f\"SELECT id FROM permistions WHERE per_name = '{permis}' \").fetchone())[0]\n if not res:\n return None\n return res\n\n\n# get all data\n@lru_cache(maxsize=None)\ndef get_all_data(obj):\n rows = session.query(obj)\n number_of_rows = session.query(obj).count()\n return rows, number_of_rows\n\n\n# func to add a instant object into table object through orm api session\n@lru_cache(maxsize=None)\ndef add_data(obj):\n try:\n session.add(obj)\n session.commit()\n except Exception:\n session.rollback()\n finally:\n session.close()\n\n\n@lru_cache(maxsize=None)\ndef delete_data(obj):\n try:\n session.delete(obj)\n session.commit()\n except Exception:\n session.rollback()\n finally:\n session.close()\n\n\n# list of error\nerrors = {\n \"404\": [{\n \"detail\": \"ID Unknown\",\n \"status\": 404,\n \"title\": \"Not Found\",\n \"type\": \"about:blank\"\n }, 404],\n \"400\": [{\n \"detail\": \"The server could not understand the request due to invalid syntax\",\n \"status\": 400,\n \"title\": \"Bad Request\",\n \"type\": \"about:blank\"\n }, 400],\n \"401\": [{\n \"detail\": \"the client must authenticate itself to get the requested response\",\n \"status\": 401,\n \"title\": \"Unauthorized\",\n \"type\": \"about:blank\"\n }, 401],\n \"405\": [{\n \"detail\": \"The method request has been disabled and cannot be used\",\n \"status\": 405,\n \"title\": \"Method not allow\",\n \"type\": \"about:blank\"\n }, 405],\n \"403\": [{\n \"detail\": \"The client does not have access rights to the content\",\n \"status\": 403,\n \"title\": \"Forbidden\",\n \"type\": \"about:blank\"\n }, 403]\n\n}\n","sub_path":"python-flask-cache/swagger_server/controllers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4090563","text":"import pandas as pd\nimport numpy as np\nfrom keras import Sequential\nfrom keras.layers import Conv2D, Flatten, Dense\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import Lasso, Ridge, LogisticRegression\nfrom malware_plots import *\n\npath = 'D:/Document/10 DAAN 881 - Decision Making/Project/data/'\nx_train = pd.read_pickle(path + '/x_train_cleaned_for_week5.pkl')\ny_train = pd.read_pickle(path + '/y_train_cleaned_for_week5.pkl')\n\n# x_train.info()\n\nrand_int = np.random.randint(0, x_train.shape[0], 250000)\nsample_x = pd.DataFrame(x_train.iloc[rand_int])\nsample_y = pd.DataFrame(y_train.iloc[rand_int])\n# sample_y[\"HasDetections\"].value_counts()\n# rand_int = np.random.randint(0, X_train.shape[0], 1000000)\n# sample_x = pd.DataFrame(X_train.iloc[rand_int])\n# sample_y = pd.DataFrame(Y_train.iloc[rand_int])\n#\n# rand_int = np.random.randint(0, X_test.shape[0], 100000)\n# test_x = pd.DataFrame(X_test.iloc[rand_int])\n# test_y = pd.DataFrame(Y_test.iloc[rand_int])\n\nsample_x[\"HasDetections\"] = sample_y[\"HasDetections\"]\n\ncols_to_remove = [\"Census_FirmwareVersionIdentifier\",\n\"Census_OEMNameIdentifier\",\n\"Census_OSVersion\",\n\"OsBuildLab\",\n\"Census_FirmwareManufacturerIdentifier\",\n\"AVProductStatesIdentifier\",\n\"Census_ProcessorModelIdentifier\",\n\"PuaMode\",\n\"IsBeta\",\n\"UacLuaenable\",\n\"Census_IsFlightsDisabled\",\n\"Firewall\",\n\"Platform\",\n\"Census_DeviceFamily\",\n\"ProductName\",\n\"AutoSampleOptIn\",\n\"HasTpm\",\n\"Census_InternalPrimaryDisplayResolutionHorizontal\",\n\"Census_InternalPrimaryDisplayResolutionVertical\"\n]\ndrop2 = [\"Census_ProcessorCoreCount\",\n\"Census_PrimaryDiskTotalCapacity\",\n\"Census_SystemVolumeTotalCapacity\",\n\"Census_TotalPhysicalRAM\"]\n\nsample_x.drop(drop2, inplace=True, axis=1)\nsample_x.drop(cols_to_remove, inplace=True, axis = 1)\n# len(sample_x.columns) # should be equal to 52, 51 categorical cols and 1 detection col\n# sample_x.info()\n\nnp.random.seed(123)\n\ntrain_x, train_y, test_x, test_y = train_test_split(sample_x, sample_y, test_size=0.2)\n\n# Apriori Analysis\nsample_x.shape\nsample_x[\"HasDetections\"].value_counts()\ndataset = sample_x[sample_x[\"HasDetections\"]==1]\ndataset = dataset.astype('category')\ndf = pd.get_dummies(dataset, prefix = dataset.columns, prefix_sep=\"$\")\n# df[\"HasDetections$0\"].head()\ndataset[\"HasDetections\"].value_counts()\ndf.to_pickle(path+\"/df_columns_with dummied_week6_all1.pkl\")\n\ndf = pd.read_pickle(path+\"/df_columns_with dummied_week6.pkl\")\ndf[\"HasDetections$1\"].value_counts()\n# Apriori function\nx = apriori(df, min_support =0.2, use_colnames=True, max_len=5)\nx.info()\nx[\"length\"] = x[\"itemsets\"].apply(lambda x: len(x))\nx = x.sort_values(by=[\"length\"], ascending=False)\n\nx.to_csv(path + \"/assco.csv\", index=False)\n\n# x[\"length\"].value_counts()\ntop_10 = x[x[\"length\"]>=3]\n# top_10.info()\n# top_10.head()\nfeatures_list = []\ncomponent_list = []\n# count1 = 0\n# count2 = 0\ncount = 0\n# for i in x[\"itemsets\"]:\n # if \"HasDetections$1\" in list(i):\n # count1 = count1+1\n # if \"HasDetections$0\" in list(i):\n # count2 = count2+1\n\n\nfor i in top_10[\"itemsets\"]:\n if (\"HasDetections$1\" in list(i)):\n # print(True)\n for j in i:\n if (j!=\"HasDetections$1\"):\n features_list.append('_'.join(j.split(\"$\")[:-1]))\n component_list.append(j)\n count = count + 1\n\n# list(top_10[\"itemsets\"])\n# len(set(features_list))\n# len(set(component_list))\n# set(features_list)\n\ntemp = pd.DataFrame(pd.DataFrame(features_list, columns=[\"Features\"])[\"Features\"].value_counts())\ntemp[temp[\"Features\"]!=\"HasDetections\"]\ntemp[temp.index != \"HasDetections\"]\nfeatures_list.remove('HasDetections')\ncomponent_list.remove(\"HasDetections$1\")\n\nplot_feature(temp[temp.index != \"HasDetections\"])\ntemp[temp.index != \"HasDetections$1\"].to_csv(path + \"/component_list_week6.csv\")\ntemp\n# boruta implementation\nfrom boruta import BorutaPy\n\nyR = df[\"HasDetections_1\"].values.ravel()\nrf = RandomForestClassifier(n_jobs=-1, class_weight='balanced', max_depth=5)\nfeat_selector = BorutaPy(rf, n_estimators=200, verbose=2, random_state=1)\nX_train_array = df.drop([\"HasDetections_1\"], inplace=False, axis=1).values\nfeat_selector.fit(X_train_array, yR)\nfeature_imp = pd.Series(feat_selector.ranking_,\n index=sample_x.columns).sort_values(ascending=True)\n\n\n# Ridge regression\nfeatures_for_reg = list(set(component_list))\nfeatures_to_select = list(set(features_list))\n\nfeatures_to_select.remove(\"HasDetections\")\nfeatures_for_reg.remove(\"HasDetections$1\")\n# features_for_reg.remove(\"HasDetections$0\")\n# features_to_select\nsample_train = sample_x[features_to_select]\nsample_train.info()\nlen(features_to_select)\n# sample_train_dumm = df[features_for_reg]\n# sample_train_dumm.info()\nsample_x.drop([\"HasDetections\"], inplace=True, axis=1)\nselective_dummy = pd.get_dummies(sample_train.astype('category'), prefix_sep=\"$\")\n\ntrain_x, test_x, train_y, test_y = train_test_split(selective_dummy,\n sample_y, test_size=0.2)\n\n\nsample_train = sample_train.astype('category')\n\n\n\nsample_train = pd.get_dummies(sample_train, prefix=sample_train.columns)\n\ntest_x = test_x.astype('category')\ntest_x = pd.get_dummies(test_x, prefix=test_x.columns)\n\ntest_x1 = test_x[features_for_reg].values\ntest_y = test_y.values\n\n\nrr = Ridge(alpha=0.1)\nX = train_x.values\nX.shape\ny = train_y.values\ny.shape\nrr.fit(X, y)\nrr.coef_\ntrain_x.shape\nlen(features_for_reg)\n\n\n\nlogreg = LogisticRegression()\nlas = Lasso(alpha=0.1)\n\nlas.fit(X,y)\nlogreg.fit(X, y)\ntrain_x.shape\nX.shape\ny_pred_l = logreg.predict(train_x)\ny_pred_las = las.predict(train_x)\n\ny_pred = rr.predict(train_x)\nacc = metrics.accuracy_score(test_y, np.round(y_pred, 0).astype(int))\nprint(classification_report(test_y, np.round(y_pred, 0).astype(int)))\nprint(\"Accuracy: \", metrics.accuracy_score(train_y, np.round(y_pred_l, 0).astype(int)))\n\ny_pred_las[1:10]\nplot_roc(test_y, y_pred_las, path+\"/week6/roc_sample_250000_lasso_final.png\",\n \"Lasso Regression ROC plot\")\ncm = confusion_matrix(test_y.values.ravel(), np.round(y_pred_las,0))\nplot_confusion_matrix(cm, classes = ['No Detection', 'Yes Detection'],\n save_path=path + \"/week6/cm_sample_250000_lr_final\",\n title = 'Malware Detection Confusion Matrix')\n\ncorr = sample_train.corr()\nplt.figure(figsize=(20,20))\nsns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),\n cmap=sns.diverging_palette(220, 10, as_cmap=True), square=True)\nplt.show()\n\n\n# random forest\n\ntype(train_y)\ntrain_y.shape\nrf = RandomForestClassifier(n_estimators=100, verbose=1, bootstrap=True, n_jobs=-1)\nrf.fit(X,train_y.values)\n\nfeature_imp = pd.Series(rf.feature_importances_,\n index=X.columns).sort_values(ascending=False)\n\ny_pred, acc = evaluate(rf, test_x, test_y)\n\n\nplot_roc(test_y, y_pred_las, path+\"/week6/roc_sample_250000_RF2_final.png\",\n \"Lasso Regression ROC plot\")\ncm = confusion_matrix(test_y.values.ravel(), np.round(y_pred,0))\nplot_confusion_matrix(cm, classes = ['No Detection', 'Yes Detection'],\n save_path=path + \"/week6/cm_sample_250000_RF2_final\",\n title = 'Malware Detection Confusion Matrix')","sub_path":"Scripts/processing_scripts/malware_apriori_random_forest_w6.py","file_name":"malware_apriori_random_forest_w6.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"385117718","text":"# ! python3\n\n# finnPåFINN.py - Opens FINN.no on the browser with the provided search term\n# # usage: python finnPåFINN.py \n\nimport webbrowser, sys, pyperclip\n\n\nif len(sys.argv) > 1:\n # Get query from command line\n\n query = ' '.join(sys.argv[1:])\n\nelse:\n #Get query from clipboard\n\n query = pyperclip.paste()\n\nwebbrowser.open('https://www.finn.no/globalsearchlander?q=' + query)\n\n","sub_path":"finnPåFINN.py","file_name":"finnPåFINN.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547459742","text":"import krpc\r\nimport numpy as np\r\nimport pygame\r\n\r\n\r\nRENDER_NETWORK = True\r\n\r\ndef tanh(x):\r\n return np.tanh(x)\r\n\r\nclass Agent:\r\n def __init__(self, network_size, network_weights):\r\n self.network_size = network_size\r\n count = 0\r\n matrices = []\r\n for i,_ in enumerate(network_size):\r\n if i >= len(network_size) -1:\r\n continue\r\n matrices.append(np.array(network_weights[count:count+network_size[i+1]*network_size[i]]).reshape((network_size[i+1],network_size[i])))\r\n count+=network_size[i+1]*network_size[i]\r\n self.matrices = matrices\r\n self.activate = np.vectorize(tanh)\r\n \r\n def calculate_output(self, input):\r\n matrix = np.matrix(input).transpose()\r\n for mat in self.matrices:\r\n matrix = self.activate(mat @ matrix)\r\n # flatten matrix to 1D list\r\n return matrix.tolist()\r\n def calculate_node_values(self, input):\r\n matrix = np.matrix(input).transpose()\r\n node_data = []\r\n node_data.append(matrix.tolist())\r\n for mat in self.matrices:\r\n matrix = self.activate(mat @ matrix)\r\n # flatten matrix to 1D list\r\n node_data.append(matrix.tolist())\r\n \r\n return node_data\r\n \r\n def to_array(self):\r\n return [np.asarray(i).tolist() for i in self.matrices]\r\n def to_matrix(self, arr):\r\n self.matrices = [np.matrix(i) for i in arr]\r\n\r\n\r\ndef load_network():\r\n file = open(\"nn.txt\", \"r\")\r\n network_size = [int(i) for i in file.readline().replace(\"\\n\", \"\").split(\",\")]\r\n \r\n network_weights = [float(i) for i in file.readline().replace(\"\\n\", \"\").split(\",\")]\r\n \r\n agent = Agent(network_size, network_weights)\r\n \r\n file.close()\r\n \r\n return (agent,network_weights)\r\n\r\ndef reset_trial(connection):\r\n connection.space_center.load(\"ai_500m\")\r\n\r\ndef get_inputs(connection):\r\n \r\n inputs = [0.0]*8\r\n \r\n # get current rocket\r\n vessel = connection.space_center.active_vessel\r\n \r\n flight_info = vessel.flight()\r\n ref_frame = connection.space_center.ReferenceFrame.create_hybrid(\r\n position=vessel.orbit.body.reference_frame,\r\n rotation=vessel.surface_reference_frame)\r\n \r\n inputs[0], inputs[1], inputs[2], inputs[3] = (1.0,1.0,1.0,1.0) #flight_info.rotation\r\n \r\n inputs[4] = (flight_info.surface_altitude)/500.0 \r\n \r\n speed = vessel.flight(ref_frame).speed\r\n horizontal_speed = vessel.flight(ref_frame).horizontal_speed\r\n y_vel, x_vel, z_vel = vessel.flight(ref_frame).velocity\r\n vertical_speed = vessel.flight(ref_frame).vertical_speed * y_vel / np.abs(y_vel)\r\n #print(speed,horizontal_speed,vertical_speed)\r\n \r\n inputs[5] = horizontal_speed/100;\r\n inputs[6] = vertical_speed/100;\r\n inputs[7] = speed/100;\r\n\r\n #print(inputs)\r\n return inputs\r\ndef get_color(weight):\r\n weight = max(-1,min(1,weight))\r\n color = (max(0,int(weight*255)),0,max(0,int(-weight*255)))\r\n return color\r\n\r\ndef render_network(agent, screen, inputs, weight_render, render_weights, network_weights):\r\n network_size = agent.network_size\r\n NODE_DIST = 50\r\n NODE_SIZE = 10\r\n X_OFFSET = 100\r\n Y_OFFSET = 50\r\n \r\n # render weights\r\n if(render_weights):\r\n count = 0\r\n for i, layer in enumerate(network_size):\r\n if i==len(network_size)-1:\r\n break\r\n for row in range(network_size[i+1]):\r\n for col in range(network_size[i]):\r\n index = row*network_size[i+1] + col + count\r\n weight = network_weights[index]\r\n v1_dist = (500-Y_OFFSET)//layer\r\n node1_x = X_OFFSET+(i*125)\r\n node1_y = Y_OFFSET+(col*v1_dist)\r\n v2_dist = (500-Y_OFFSET)//network_size[i+1]\r\n node2_x = X_OFFSET+((i+1)*125)\r\n node2_y = Y_OFFSET+(row*v2_dist)\r\n color = get_color(weight)\r\n pygame.draw.line(weight_render, color=color,start_pos=(node1_x,node1_y), end_pos=(node2_x,node2_y))\r\n count += network_size[i+1]*network_size[i]\r\n # draw weights to screen\r\n screen.blit(weight_render, (0,0))\r\n \r\n network_data = agent.calculate_node_values(inputs)\r\n \r\n # draw nodes\r\n for i, layer in enumerate(network_size):\r\n v_dist = (500-Y_OFFSET)//layer\r\n node_data = network_data[i]\r\n for node in range(layer):\r\n val = node_data[node][0]\r\n color = get_color(val)\r\n pygame.draw.circle(screen,radius=NODE_SIZE, center=(X_OFFSET+(i*125),Y_OFFSET+(node*v_dist)), color=color)\r\n \r\n\r\ndef main():\r\n \r\n agent,network_weights = load_network()\r\n print(\"Agent loaded.\")\r\n \r\n print(\"Connecting to server...\")\r\n # connect to server\r\n connection = krpc.connect(\r\n name='yolo',\r\n address='localhost',\r\n rpc_port=50000, stream_port=50001) #Default ports are 50000, 50001\r\n print(\"Successfully connected to server.\")\r\n\r\n print(\"Loading quick save...\")\r\n reset_trial(connection)\r\n print(\"Loaded quick save\")\r\n \r\n pygame.init()\r\n \r\n screen = pygame.display.set_mode((600,500))\r\n weight_render = pygame.Surface((600,500))\r\n weight_render.fill((100,100,100))\r\n render_weights = True\r\n \r\n running = True\r\n cycle_count = 0\r\n while running:\r\n cycle_count+=1;\r\n # get inputs to NN\r\n inputs = get_inputs(connection)\r\n \r\n # Calculate output of NN\r\n outputs = agent.calculate_output(inputs)\r\n \r\n # Use the output to control the agent\r\n control = connection.space_center.active_vessel.control\r\n control.throttle = (outputs[0][0]+1)/2\r\n if outputs[1][0] > 0:\r\n control.sas_mode = connection.space_center.SASMode.retrograde\r\n else:\r\n control.sas_mode = connection.space_center.SASMode.stability_assist\r\n\r\n \r\n #print(outputs[0][0],(outputs[0][0]+1)/2)\r\n\r\n if str(connection.space_center.active_vessel.situation) == \"VesselSituation.landed\":\r\n # reset throttle if the lander has touched the ground\r\n control.throttle = 0.0\r\n print(\"Landed\")\r\n running = False\r\n # Every Kth cycle, display network\r\n if RENDER_NETWORK and cycle_count%10==0:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n screen.fill((255,255,255))\r\n render_network(agent,screen, inputs, weight_render,render_weights,network_weights)\r\n render_weights = False\r\n pygame.display.update()\r\n connection.close()\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"lander.py","file_name":"lander.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91467819","text":"import psycopg2\nimport sys\nimport subprocess\n\n\ndef open_and_split(file):\n \"\"\"Deze functie opent het bestand 'pathway_table.txt' en stopt\n alles van het bestand in een lijst.\n De inhoud van het bestand 'pathway_table.txt' bevat tab's ('\\t')\n , vervolgens wordt daarop gesplits zodat\n de inhoud gescheiden wordt.\n :param file: Dit is een txt bestand.\n :return: inhoud: Dit is een lijst met de inhoud die in het\n bestand 'pathway_table.txt' zit.\n \"\"\"\n inhoud = []\n with open(file, 'r') as files:\n for i in files.readlines():\n inhoud.append(i.split('\\t'))\n return inhoud\n\n\ndef clean_make_lists(data):\n \"\"\"Deze functie maakt een nieuwe lijst van de gereturnde lijst\n 'inhoud_bestand'. Dit wordt gedaan\n door middel van een for loop en door de enters ('\\n')\n te verwijderen. De lijst wordt vervolgens\n als variabele opgeslagen en gereturnd.\n :param data: Dit is de variable 'inhoud_bestand' die wordt\n doorgeven als de variable name 'data'.\n :return: new_list: Dit is een lijst met de inhoud die in het\n bestand 'pathway_table.txt' zit.\n \"\"\"\n new_list = []\n for lijnen in data:\n lijnen.remove('\\n')\n for i in lijnen[1:len(lijnen)]:\n listt = i.split(' ')\n if listt[0][0:3] == \"oaa\":\n new_list.append([lijnen[0], listt[0], ''.join(\n i + ' ' for i in listt[1: len(listt)])])\n else:\n new_list.append([lijnen[0], listt[1], ''.join(\n i + ' ' for i in listt[2: len(listt)])])\n return new_list\n\n\ndef write_to_file(clean_path):\n \"\"\"Deze functie maakt gebruik van subprocess om de inhoud van het\n bestand 'clean_pathways.txt' te\n sorteren zodat er alleen maar unieke waardes overblijven.\n Alle dubbele waardes worden er dus\n uitgehaald. Deze aanpassingen in het bestand\n 'clean_pathways.txt' worden bewaard in het bestand\n en de oude inhoud wordt overschreven.\n :param clean_path: Dit is een lijst met de inhoud die in het\n bestand 'pathway_table.txt' zit.\n \"\"\"\n with open('clean_pathways.txt', 'w+') as file:\n for lines in clean_path:\n for i in lines:\n file.write(i)\n file.write('\\t')\n file.write('\\n')\n subprocess.call(\n 'cat clean_pathways.txt |sort |'\n ' uniq>clean_pathway.txt; rm clean_pathways.txt',\n shell=True)\n\n\ndef make_patways_files():\n \"\"\"Deze functie roept drie andere functies aan om de informatie\n van het bestand \"pathway_table.txt\"\n te filteren. Nadat de informatie uit \"pathway_table.txt\" is\n gehaald wordt er een lijst gemaakt, deze\n lijst wordt nog gefilteerd en vervolgens als een bestand\n weggeschereven.\n \"\"\"\n inhoud_bestand = open_and_split('pathway_table.txt')\n clean_path = clean_make_lists(inhoud_bestand)\n write_to_file(clean_path)\n\n\ndef Setup(host, db, user, password):\n \"\"\"Deze functie zorgt ervoor dat er ingelogt kan worden om\n conectie te maken met de database\n en dat er querry's uitgevoerd kunnen worden via de terminal.\n :return: con: Logt in op postgrespsql.\n :return: cur: Zorgt ervoor dat de query uitgevoerd wordt.\n \"\"\"\n con = None\n con = psycopg2.connect(\n \"host='{}' dbname='{}' user='{}' password='{}'\"\n \"\".format(host, db, user,\n password))\n cur = con.cursor()\n return con, cur\n\n\ndef info_all():\n \"\"\"Deze functie maakt het bestand 'alles_table_clean.txt'.\n Dit wordt gedaan door informatie uit\n het bestand 'info_seq.txt' te halen, doormiddel van een for\n loop. De informatie wordt in\n vier rijen gescheiden doormiddel van tab's ('\\t'). Door tab's\n toe te voegen is het makkelijker\n om de informatie in de database te zetten.\n\n \"\"\"\n with open('info_seq.txt', 'r') as file:\n a = file.readlines()\n with open('alles_table_clean.txt', 'w') as outfile:\n for i in a:\n outfile.write(\n i.split(' ')[0].split('/')[1] + '\\t' + i.split(' ')[\n 2] + '\\t' + i.split(' ')[\n len(i.split(' ')) - 2] + '\\t' +\n i.split(' ')[len(i.split(' ')) - 1])\n\n\ndef get_parameters():\n \"\"\"Deze functie zorgt voor de parameters die op worden gegeven.\n Hiermee kan de database worden gekozen, het wachtwoord van de\n gebuiker en de user.\"\"\"\n host = sys.argv[1]\n db = sys.argv[2]\n user = sys.argv[3]\n password = sys.argv[4]\n return host, db, user, password\n\n\ndef make_server_seq(con, cur):\n \"\"\"Hier word de tabel :Server_sequentie_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\n \"\"\"\n org_info_sql = \"\"\"\n INSERT INTO Server_sequentie_c4(seq_id,org_seq) VALUES (%s,%s)\"\"\"\n f = open(\"org_table_clean.txt\", \"r\")\n for line in f.readlines():\n data = line.strip().split('\\t')\n data = [None if x == 'NONE' else str(x) for x in data]\n cur.execute(org_info_sql, tuple(data[0:2]))\n con.commit()\n\n\ndef make_Ncbi_mrna_c4(con, cur):\n \"\"\"Hier word de tabel :Ncbi_mrna_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\n \"\"\"\n org_info_sql = \"\"\"\n INSERT INTO Ncbi_mrna_c4(ncbi_m_id,sequentie) VALUES (%s,%s)\"\"\"\n f = open(\"mrna_table_clean.txt\", \"r\")\n for line in f.readlines():\n data = line.strip().split('\\t')\n data = [None if x == 'NONE' else str(x) for x in data]\n cur.execute(org_info_sql, tuple([data[0], data[2]]))\n con.commit()\n\n\ndef make_Pathway_c4(con, cur):\n \"\"\"Hier word de tabel :Pathway_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\n \"\"\"\n org_info_sql = \"\"\"\n INSERT INTO Pathway_c4(pathway_id,info_pathway) VALUES (%s,%s)\"\"\"\n f = open(\"clean_pathway.txt\", \"r\")\n for line in f.readlines():\n data = line.strip().split('\\t')\n data = [None if x == 'NONE' else str(x.strip()) for x in data]\n try:\n cur.execute(org_info_sql, tuple([data[1], data[2]]))\n except psycopg2.IntegrityError:\n con.rollback()\n else:\n con.commit()\n con.commit()\n\n\ndef make_Ncbi_protein_c4(con, cur):\n \"\"\"Hier word de tabel :Ncbi_protein_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\n \"\"\"\n org_info_sql = \"\"\"\n INSERT INTO Ncbi_protein_c4(ncbi_id,name_protein,ec_code,lengte,orginele_seq_aa) VALUES (%s,%s,%s,%s,%s)\"\"\"\n f = open(\"eiwit_table_clean.txt\", \"r\")\n for line in f.readlines():\n data = line.strip().split('\\t')\n data = [None if x == 'NONE' else str(x.strip()) for x in data]\n cur.execute(org_info_sql, tuple(data))\n con.commit()\n\n\ndef make_Ncbi_gene_c4(con, cur):\n \"\"\"Hier word de tabel :Ncbi_gene_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\"\"\"\n\n org_info_sql = \"\"\"\n INSERT INTO Ncbi_gene_c4(ncbi_g_id,name,length,chromosome,location,seq,exons) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\"\n f = open(\"ncbi_table_clean.txt\", \"r\")\n for line in f.readlines():\n\n data = line.strip().split('\\t')[\n 0:len(line.strip().split('\\t')) - 1]\n clean = []\n for info in data:\n if info != \"\":\n try:\n clean.append(abs(int(info)))\n except:\n clean.append(info)\n\n clean = [None if x == 'NONE' else str(x) for x in clean]\n\n cur.execute(org_info_sql, tuple(clean))\n con.commit()\n\n\ndef prot_gen_c4(con, cur):\n \"\"\"Hier word de tabel :prot_gen_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\n \"\"\"\n org_info_sql = \"\"\"\n INSERT INTO prot_gen_c4(ncbi_prot_id,ncbi_gene_id) VALUES (%s,%s)\"\"\"\n\n f = open(\"alles_table_clean.txt\", 'r')\n for line in f.readlines():\n data = line.strip().split('\\t')\n\n data = [None if x == 'NONE' else str(x) for x in data]\n try:\n cur.execute(org_info_sql, tuple([data[1], data[2]]))\n except psycopg2.IntegrityError:\n con.rollback()\n else:\n con.commit()\n\n con.commit()\n\n\ndef Prot_path_c4(con, cur):\n \"\"\"Hier word de tabel :Prot_path_c4: gevult.\n Eerst word het juiste bestand ingelezen\n met de gegevens en dan met sql ingezet.\n :param con: Logt in op postgrespsql\n :param cur: Zorgt ervoor dat de query uitgevoerd wordt.\n :return: None\n \"\"\"\n org_info_sql = \"\"\"\n INSERT INTO Prot_path_c4(ncbi_id,pathway_id) VALUES (%s,%s)\"\"\"\n\n f = open(\"clean_pathway.txt\", 'r')\n\n counter = 0\n for line in f.readlines():\n data = line.strip().split('\\t')\n data = [None if x == 'NONE' else str(x) for x in data]\n cur.execute(org_info_sql,\n tuple([data[0].replace(\"oaa\", \"\"), data[1]]))\n\n con.commit()\n\n\ndef pipelinerun():\n \"\"\"Dit is de main functie voor het programma. Stap voor stap worden\n de verschillende functies uitgevoerd in de goede volgorde waardoor\n er geen\n error's optreden en de KEY's gebonden zijn aan de juiste\n identifier.\n Pas nadat alle tabellen gemaakt zijn wordt de informatie\n pas toegevoegd.\n Als het programma klaar is, zouden alle tabellen gevuld\n moeten zijn en is\n de database klaar voor gebruik.\n \"\"\"\n make_patways_files()\n info_all()\n try:\n host, db, user, password = get_parameters()\n except IndexError:\n print('no db values')\n host = 'localhost'\n db = 'project'\n user = 'user'\n password = 'password'\n print('using defaults')\n\n con, cur = Setup(host, db, user, password)\n print(\"connected\")\n con.commit()\n\n # Maken van tabellen\n make_server_seq(con, cur)\n print(\"first done\")\n make_Ncbi_gene_c4(con, cur)\n print(\"second done\")\n\n make_Ncbi_mrna_c4(con, cur)\n make_Ncbi_protein_c4(con, cur)\n print(\"koppelen\")\n\n prot_gen_c4(con, cur)\n make_Pathway_c4(con, cur)\n Prot_path_c4(con, cur)\n\n print(\"kopel done\")\n\n con.close()\n\n\nif __name__ == '__main__':\n pipelinerun()\n","sub_path":"herkansing2018/db_tabels_andfill_v2.py","file_name":"db_tabels_andfill_v2.py","file_ext":"py","file_size_in_byte":11004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10260769","text":"# pandas中的Series是一种有序定长的字典,由数据和索引组成\n# 创建\nimport pandas as pd\n# aSer=pd.Series([1,2,'a'])# 自带索引\n# bSer=pd.Series(['apple','peach','lemon'],index=[1,2,3])\n# #查看index和value bSer.index,bSer.values\n# aSer*2#每个元素都乘以2,访问数据元素aSer['a']\n# print(bSer)\n# 数据对齐\ndata={'AXP':'86.40','CSCO':'122.64','BA':'99.44'}\nsindex=['AXP','CSCO','BA','AAPL']\naser=pd.Series(data,index=sindex)#sindex中有的数据一一对应,没有的对应为NaN(没有对应数据)\n# print(pd.isnull(aser))# 检测aser中哪些值为空\nbser=pd.Series(data)\ncser=bser+aser\nprint(cser)","sub_path":"python_code/capture data/强大的数据结构和Python数据库/变长字典Serice.py","file_name":"变长字典Serice.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195511995","text":"import math\r\nimport zipfile\r\nfrom PIL import Image, ImageOps, ImageDraw\r\nimport pytesseract\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\npytesseract.pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe'\r\n\r\n# loading the face detection classifier\r\nface_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\nparsed_img_src = {}\r\n\r\nwith zipfile.ZipFile('small_img.zip', 'r') as archive:\r\n for entry in archive.infolist():\r\n with archive.open(entry) as file:\r\n img = Image.open(file).convert('RGB')\r\n parsed_img_src[entry.filename] = {'pil_img':img}\r\n\r\n\r\nfor img_name in parsed_img_src.keys():\r\n text = pytesseract.image_to_string(parsed_img_src[img_name]['pil_img'])\r\n parsed_img_src[img_name]['text'] = text\r\n\r\n\r\nfor img_name in parsed_img_src.keys():\r\n open_cv_image = np.array(parsed_img_src[img_name]['pil_img']) \r\n img_g = cv.cvtColor(open_cv_image, cv.COLOR_BGR2GRAY)\r\n faces_bounding_boxes = face_cascade.detectMultiScale(img_g, 1.3, 5)\r\n parsed_img_src[img_name]['faces'] = []\r\n for x,y,w,h in faces_bounding_boxes:\r\n face = parsed_img_src[img_name]['pil_img'].crop((x,y,x+w,y+h))\r\n parsed_img_src[img_name]['faces'].append(face)\r\n \r\n \r\nfor img_name in parsed_img_src.keys():\r\n for face in parsed_img_src[img_name]['faces']:\r\n face.thumbnail((100,100),Image.ANTIALIAS)\r\n \r\n \r\n \r\ndef search(keyword):\r\n for img_name in parsed_img_src:\r\n if (keyword in parsed_img_src[img_name]['text']):\r\n if(len(parsed_img_src[img_name]['faces']) != 0):\r\n print(\"Result found in file {}\".format(img_name))\r\n h = math.ceil(len(parsed_img_src[img_name]['faces'])/5)\r\n contact_sheet=Image.new('RGB',(500, 100*h))\r\n xc = 0\r\n yc = 0\r\n for img in parsed_img_src[img_name]['faces']:\r\n contact_sheet.paste(img, (xc, yc))\r\n if xc + 100 == contact_sheet.width:\r\n xc = 0\r\n yc += 100\r\n else:\r\n xc += 100\r\n \r\n contact_sheet.show() \r\n else:\r\n print(\"Result found in file {} \\nBut there were no faces in that file\\n\\n\".format(img_name))\r\n return\r\n\r\n\r\n\r\n\r\nsearch('Mark') ","sub_path":"Open CV Project/open_cv_project.py","file_name":"open_cv_project.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"470978479","text":"# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\nimport re\nfrom pathlib import Path, PurePath\nfrom textwrap import dedent\nfrom typing import List, Optional\n\nfrom pants.backend.python.dependency_inference import rules as dependency_inference_rules\nfrom pants.backend.python.rules import pex, pex_from_targets, pytest_runner, python_sources\nfrom pants.backend.python.rules.coverage import create_coverage_config\nfrom pants.backend.python.rules.pytest_runner import PythonTestFieldSet\nfrom pants.backend.python.target_types import PythonLibrary, PythonRequirementLibrary, PythonTests\nfrom pants.core.goals.test import TestDebugRequest, TestResult\nfrom pants.core.util_rules import source_files, stripped_source_files\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import DigestContents, FileContent\nfrom pants.engine.process import InteractiveRunner\nfrom pants.engine.rules import QueryRule\nfrom pants.option.options_bootstrapper import OptionsBootstrapper\nfrom pants.testutil.external_tool_test_base import ExternalToolTestBase\nfrom pants.testutil.option_util import create_options_bootstrapper\nfrom pants.testutil.python_interpreter_selection import skip_unless_python27_and_python3_present\n\n\nclass PytestRunnerIntegrationTest(ExternalToolTestBase):\n\n source_root = \"tests/python\"\n package = os.path.join(source_root, \"pants_test\")\n good_source = FileContent(path=\"test_good.py\", content=b\"def test():\\n pass\\n\")\n bad_source = FileContent(path=\"test_bad.py\", content=b\"def test():\\n assert False\\n\")\n py3_only_source = FileContent(path=\"test_py3.py\", content=b\"def test() -> None:\\n pass\\n\")\n library_source = FileContent(path=\"library.py\", content=b\"def add_two(x):\\n return x + 2\\n\")\n conftest_source = FileContent(\n path=\"conftest.py\",\n content=b\"def pytest_runtest_setup(item):\\n\" b\" print('In conftest!')\\n\",\n )\n\n def write_file(self, file_content: FileContent) -> None:\n self.create_file(\n relpath=PurePath(self.package, file_content.path).as_posix(),\n contents=file_content.content.decode(),\n )\n\n def create_python_library(\n self,\n source_files: List[FileContent],\n *,\n name: str = \"library\",\n dependencies: Optional[List[str]] = None,\n ) -> None:\n for source_file in source_files:\n self.write_file(source_file)\n source_globs = [PurePath(source_file.path).name for source_file in source_files] + [\n \"__init__.py\"\n ]\n self.add_to_build_file(\n self.package,\n dedent(\n f\"\"\"\\\n python_library(\n name={repr(name)},\n sources={source_globs},\n dependencies={[*(dependencies or ())]},\n )\n \"\"\"\n ),\n )\n self.create_file(os.path.join(self.package, \"__init__.py\"))\n\n def create_python_test_target(\n self,\n source_files: List[FileContent],\n *,\n dependencies: Optional[List[str]] = None,\n interpreter_constraints: Optional[str] = None,\n ) -> None:\n self.add_to_build_file(\n relpath=self.package,\n target=dedent(\n f\"\"\"\\\n python_tests(\n name='target',\n dependencies={dependencies or []},\n compatibility={[interpreter_constraints] if interpreter_constraints else []},\n )\n \"\"\"\n ),\n )\n for source_file in source_files:\n self.write_file(source_file)\n\n def setup_thirdparty_dep(self) -> None:\n self.add_to_build_file(\n relpath=\"3rdparty/python\",\n target=dedent(\n \"\"\"\\\n python_requirement_library(\n name='ordered-set',\n requirements=['ordered-set==3.1.1'],\n )\n \"\"\"\n ),\n )\n\n @classmethod\n def target_types(cls):\n return [PythonLibrary, PythonTests, PythonRequirementLibrary]\n\n @classmethod\n def rules(cls):\n return (\n *super().rules(),\n create_coverage_config,\n *pytest_runner.rules(),\n *python_sources.rules(),\n *pex.rules(),\n *pex_from_targets.rules(),\n *source_files.rules(),\n *stripped_source_files.rules(),\n *dependency_inference_rules.rules(), # For conftest detection.\n QueryRule(TestResult, (PythonTestFieldSet, OptionsBootstrapper)),\n QueryRule(TestDebugRequest, (PythonTestFieldSet, OptionsBootstrapper)),\n )\n\n def run_pytest(\n self,\n *,\n address: Optional[Address] = None,\n passthrough_args: Optional[str] = None,\n junit_xml_dir: Optional[str] = None,\n use_coverage: bool = False,\n execution_slot_var: Optional[str] = None,\n ) -> TestResult:\n args = [\n \"--backend-packages=pants.backend.python\",\n f\"--source-root-patterns={self.source_root}\",\n # pin to lower versions so that we can run Python 2 tests\n \"--pytest-version=pytest>=4.6.6,<4.7\",\n \"--pytest-pytest-plugins=['zipp==1.0.0', 'pytest-cov>=2.8.1,<2.9']\",\n ]\n if passthrough_args:\n args.append(f\"--pytest-args='{passthrough_args}'\")\n if junit_xml_dir:\n args.append(f\"--pytest-junit-xml-dir={junit_xml_dir}\")\n if use_coverage:\n args.append(\"--test-use-coverage\")\n if execution_slot_var:\n args.append(f\"--pytest-execution-slot-var={execution_slot_var}\")\n if not address:\n address = Address(self.package, target_name=\"target\")\n subjects = [\n PythonTestFieldSet.create(PythonTests({}, address=address)),\n create_options_bootstrapper(args=args),\n ]\n test_result = self.request_product(TestResult, subjects)\n debug_request = self.request_product(TestDebugRequest, subjects)\n if debug_request.process is not None:\n debug_result = InteractiveRunner(self.scheduler).run(debug_request.process)\n assert test_result.exit_code == debug_result.exit_code\n return test_result\n\n def test_single_passing_test(self) -> None:\n self.create_python_test_target([self.good_source])\n result = self.run_pytest()\n assert result.exit_code == 0\n assert f\"{self.package}/test_good.py .\" in result.stdout\n\n def test_single_failing_test(self) -> None:\n self.create_python_test_target([self.bad_source])\n result = self.run_pytest()\n assert result.exit_code == 1\n assert f\"{self.package}/test_bad.py F\" in result.stdout\n\n def test_mixed_sources(self) -> None:\n self.create_python_test_target([self.good_source, self.bad_source])\n result = self.run_pytest()\n assert result.exit_code == 1\n assert f\"{self.package}/test_good.py .\" in result.stdout\n assert f\"{self.package}/test_bad.py F\" in result.stdout\n\n def test_absolute_import(self) -> None:\n self.create_python_library([self.library_source])\n source = FileContent(\n path=\"test_absolute_import.py\",\n content=dedent(\n \"\"\"\\\n from pants_test.library import add_two\n\n def test():\n assert add_two(2) == 4\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source], dependencies=[\":library\"])\n result = self.run_pytest()\n assert result.exit_code == 0\n assert f\"{self.package}/test_absolute_import.py .\" in result.stdout\n\n def test_relative_import(self) -> None:\n self.create_python_library([self.library_source])\n source = FileContent(\n path=\"test_relative_import.py\",\n content=dedent(\n \"\"\"\\\n from .library import add_two\n\n def test():\n assert add_two(2) == 4\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source], dependencies=[\":library\"])\n result = self.run_pytest()\n assert result.exit_code == 0\n assert f\"{self.package}/test_relative_import.py .\" in result.stdout\n\n def test_transitive_dep(self) -> None:\n self.create_python_library([self.library_source])\n transitive_dep_fc = FileContent(\n path=\"transitive_dep.py\",\n content=dedent(\n \"\"\"\\\n from pants_test.library import add_two\n\n def add_four(x):\n return add_two(x) + 2\n \"\"\"\n ).encode(),\n )\n self.create_python_library(\n [transitive_dep_fc], name=\"transitive_dep\", dependencies=[\":library\"]\n )\n source = FileContent(\n path=\"test_transitive_dep.py\",\n content=dedent(\n \"\"\"\\\n from pants_test.transitive_dep import add_four\n\n def test():\n assert add_four(2) == 6\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source], dependencies=[\":transitive_dep\"])\n result = self.run_pytest()\n assert result.exit_code == 0\n assert f\"{self.package}/test_transitive_dep.py .\" in result.stdout\n\n def test_thirdparty_dep(self) -> None:\n self.setup_thirdparty_dep()\n source = FileContent(\n path=\"test_3rdparty_dep.py\",\n content=dedent(\n \"\"\"\\\n from ordered_set import OrderedSet\n\n def test():\n assert OrderedSet((1, 2)) == OrderedSet([1, 2])\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source], dependencies=[\"3rdparty/python:ordered-set\"])\n result = self.run_pytest()\n assert result.exit_code == 0\n assert f\"{self.package}/test_3rdparty_dep.py .\" in result.stdout\n\n def test_thirdparty_transitive_dep(self) -> None:\n self.setup_thirdparty_dep()\n library_fc = FileContent(\n path=\"library.py\",\n content=dedent(\n \"\"\"\\\n import string\n from ordered_set import OrderedSet\n\n alphabet = OrderedSet(string.ascii_lowercase)\n \"\"\"\n ).encode(),\n )\n self.create_python_library(\n [library_fc],\n dependencies=[\"3rdparty/python:ordered-set\"],\n )\n source = FileContent(\n path=\"test_3rdparty_transitive_dep.py\",\n content=dedent(\n \"\"\"\\\n from pants_test.library import alphabet\n\n def test():\n assert 'a' in alphabet and 'z' in alphabet\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source], dependencies=[\":library\"])\n result = self.run_pytest()\n assert result.exit_code == 0\n assert f\"{self.package}/test_3rdparty_transitive_dep.py .\" in result.stdout\n\n @skip_unless_python27_and_python3_present\n def test_uses_correct_python_version(self) -> None:\n self.create_python_test_target(\n [self.py3_only_source], interpreter_constraints=\"CPython==2.7.*\"\n )\n py2_result = self.run_pytest()\n assert py2_result.exit_code == 2\n assert \"SyntaxError: invalid syntax\" in py2_result.stdout\n Path(\n self.build_root, self.package, \"BUILD\"\n ).unlink() # Cleanup in order to recreate the target\n self.create_python_test_target(\n [self.py3_only_source], interpreter_constraints=\"CPython>=3.6\"\n )\n py3_result = self.run_pytest()\n assert py3_result.exit_code == 0\n assert f\"{self.package}/test_py3.py .\" in py3_result.stdout\n\n def test_respects_passthrough_args(self) -> None:\n source = FileContent(\n path=\"test_config.py\",\n content=dedent(\n \"\"\"\\\n def test_run_me():\n pass\n\n def test_ignore_me():\n pass\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source])\n result = self.run_pytest(passthrough_args=\"-k test_run_me\")\n assert result.exit_code == 0\n assert f\"{self.package}/test_config.py .\" in result.stdout\n assert \"collected 2 items / 1 deselected / 1 selected\" in result.stdout\n\n def test_junit(self) -> None:\n self.create_python_test_target([self.good_source])\n result = self.run_pytest(junit_xml_dir=\"dist/test-results\")\n assert result.exit_code == 0\n assert f\"{self.package}/test_good.py .\" in result.stdout\n assert result.xml_results is not None\n digest_contents = self.request_product(DigestContents, [result.xml_results])\n file = digest_contents[0]\n assert file.path.startswith(\"dist/test-results\")\n assert b\"pants_test.test_good\" in file.content\n\n def test_coverage(self) -> None:\n self.create_python_test_target([self.good_source])\n result = self.run_pytest(use_coverage=True)\n assert result.exit_code == 0\n assert f\"{self.package}/test_good.py .\" in result.stdout\n assert result.coverage_data is not None\n\n def test_conftest_handling(self) -> None:\n \"\"\"Tests that we a) inject a dependency on conftest.py and b) skip running directly on\n conftest.py.\"\"\"\n self.create_python_test_target([self.good_source])\n self.create_file(\n PurePath(self.source_root, self.conftest_source.path).as_posix(),\n self.conftest_source.content.decode(),\n )\n self.add_to_build_file(self.source_root, \"python_tests()\")\n\n result = self.run_pytest(passthrough_args=\"-s\")\n assert result.exit_code == 0\n assert f\"{self.package}/test_good.py In conftest!\\n.\" in result.stdout\n\n result = self.run_pytest(\n address=Address(self.source_root, relative_file_path=\"conftest.py\")\n )\n assert result.exit_code is None\n\n def test_execution_slot_variable(self) -> None:\n source = FileContent(\n path=\"test_concurrency_slot.py\",\n content=dedent(\n \"\"\"\\\n import os\n\n def test_fail_printing_slot_env_var():\n slot = os.getenv(\"SLOT\")\n print(f\"Value of slot is {slot}\")\n # Deliberately fail the test so the SLOT output gets printed to stdout\n assert 1 == 2\n \"\"\"\n ).encode(),\n )\n self.create_python_test_target([source])\n result = self.run_pytest(execution_slot_var=\"SLOT\")\n assert result.exit_code == 1\n assert re.search(r\"Value of slot is \\d+\", result.stdout)\n","sub_path":"src/python/pants/backend/python/rules/pytest_runner_integration_test.py","file_name":"pytest_runner_integration_test.py","file_ext":"py","file_size_in_byte":15047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"553495002","text":"import sys\n\n#check for proper user input\nif len(sys.argv) < 2:\n sys.exit(\"Usage: ceaser.py key\")\nkey = int(sys.argv[1])\nif key < 1:\n sys.exit(\"Error: Key must be positive and not zero\")\nkey = key % 26\n\n#get message\nprint(\"Message: \", end='')\nstring = input()\n\n#init counter\ni = 0\n\n#create empty to store new chars\nl = []\n\n#convert\nwhile i < len(string):\n \n #convert char to int + key\n y = (int(ord(string[i]) + key))\n \n if string[i] == \" \":\n l.append(\" \")\n \n #upper case\n if string[i].isupper():\n if y < (int(ord('Z'))):\n x = (chr(ord(string[i]) + key)) \n l.append(x)\n else:\n x = (chr(ord(string[i]) - key))\n l.append(x)\n \n #lower case \n if string[i].islower():\n if y < (int(ord('z'))):\n x = (chr(ord(string[i]) + key))\n l.append(x)\n else:\n x = (chr(ord(string[i]) - key))\n l.append(x)\n i += 1\n\n#join list to new cipher string and print \ncipher = \"\".join(l)\nprint(\"cipher text: {}\".format(cipher))\n","sub_path":"cipherText.py","file_name":"cipherText.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"450119930","text":"from PyQt5.QtWidgets import QMainWindow\nfrom calcula import calc_inter\nfrom servicos import servicos\nfrom calcula.calculadora_ip_cont import main\nimport sys\n\n\nclass CalculadoraIp(QMainWindow, calc_inter.Ui_MainWindow):\n def __init__(self, user, parent=None):\n super().__init__(parent)\n super().setupUi(self)\n\n self.btncalcip.clicked.connect(self.realizaronsulta)\n self.btnvoltar.clicked.connect(self.voltar)\n self.btnsair.clicked.connect(self.saindo)\n self.ip = self.inputip\n self.cid = self.inputcidr\n self.user = user\n\n def voltar(self):\n self.volta = servicos.Servicos(self.user)\n self.volta.show()\n self.hide()\n\n def saindo(self):\n sys.exit()\n\n def realizaronsulta(self):\n\n ip = self.ip.text()\n cidr = self.cid.text()\n calc = main.CalculandoTudo(ip, cidr)\n try:\n resutado = calc.verifica()\n self.jaenlaresult.setText(str(resutado))\n\n except Exception as Error:\n\n self.jaenlaresult.setText(str(Error))\n\n\n","sub_path":"Archer Adm/calcula/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617373361","text":"testString = \"\"\nnewString = \"\"\nsearchChar = \"\"\nreplaceChar = \"\"\n\n\nwhile testString == \"\": #Until they enter a string, ask for one\n testString = input(\"Please enter some text to search : \")\n\nwhile len(searchChar) != 1: #Until they enter a character, ask for one\n searchChar = input(\"Enter a character to replace for :\")\n\nwhile len(replaceChar) != 1: #Until they enter a character, ask for one\n replaceChar = input(\"Enter a character to replace {0} with:\".format(searchChar))\n\nfor x in testString: #Loop through testString\n if x == searchChar: #If the character is the character to be replaced\n newString += replaceChar #Add the replacement character to the list\n else:\n newString += x #Else add the character\n\nprint (\"The new string is {0}\".format(newString)) #Show the user the string\n","sub_path":"linearSearch.py","file_name":"linearSearch.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8678196","text":"import os\nimport csv\nimport pandas as pd\nimport numpy as np\n\n# Load allsongs data\nallsongs_df = pd.read_csv('allsongs_features_standardized.csv')\nallsongs_df.set_index('song_index', inplace=True)\n\n# Specify feature columns to consider in modeling\nfeature_cols = [\n 'duration_ms',\n 'acousticness',\n 'danceability',\n 'energy',\n 'instrumentalness',\n 'tempo',\n 'valence'\n]\n\n# Write a function to construct X and y that can be fed into modeling\ndef construct_Xy(playlist_id, ground_lst, holdout_lst, candidate_lst, allsongs_df=allsongs_df, feature_cols=feature_cols):\n # Construct data matrix for candidate songs\n candidate_array = allsongs_df.loc[candidate_lst, feature_cols].values # (n, k)\n n = candidate_array.shape[0]\n\n # Construct data matrix for ground playlist\n ground_array = allsongs_df.loc[ground_lst, feature_cols].values # (m, k)\n ground_mean = np.nanmean(ground_array, axis=0, keepdims=True) # (1, k)\n ground_std = np.nanstd(ground_array, axis=0, keepdims=True) # (1, k)\n ground_agg = np.hstack((ground_mean, ground_std)) # (1, 2k)\n ground_agg_array = np.tile(ground_agg, (n, 1)) # (n x 2k)\n\n # Construct X and y that can be fed into modeling\n X = np.hstack((ground_agg_array, candidate_array)) # (n, 3k)\n y = np.array([song in holdout_lst for song in candidate_lst]).astype(int) # (n,)\n pid_sid = list(zip([playlist_id]*len(candidate), candidate)) # Save a list of tuples containing playlist ID and candidate song index\n\n return X, y, pid_sid\n\n# # Construct a toy example\n# playlistID = '00-00'\n# ground = [100371, 16025, 69437, 977, 16871, 131105, 11670, 154829, 201290, 169335, 15408, 2475, 96625, 25774]\n# holdout = [97997, 25385, 15404, 463, 28096, 698]\n# candidate = [16429, 865879, 463, 64136, 135520, 1965, 10862, 483482, 32685, 91911, 368718, 13948, 10848]\n\n# # Test a toy example\n# X, y, pid_sid = construct_Xy(playlistID, ground, holdout, candidate)\n# print(X.shape)\n# print(y.shape)\n\n# Load and prep data to transform\nsubsample_train_df = pd.read_csv('new_subsample_train.csv')\n#candidate_songs_df = pd.read_csv('candidate_songs_1of4.csv').iloc[:10000][['playlist_id', 'candidate_list']]; fpath = 'Xy_train_1of4.csv' # Process data prepared by Caroline\ncandidate_songs_df = pd.read_csv('candidate_songs_2of4.csv'); fpath = 'Xy_train_2of4.csv' # Process data prepared by Claire\n#candidate_songs_df = pd.read_csv('candidate_songs_3of4.csv').iloc[20000:30000][['playlist_id', 'candidate_list']]; fpath = 'Xy_train_3of4.csv' # Process data prepared by Caroline (on Paulina's behalf)\n#candidate_songs_df = pd.read_csv('candidate_songs_4of4.csv'); fpath = 'Xy_train_4of4.csv' # Process data prepared by Yoon\nsubsample_train_df = subsample_train_df.merge(candidate_songs_df, on='playlist_id', how='right')\nsubsample_train_df.fillna('[]', inplace=True)\nsubsample_train_df['song_list'] = subsample_train_df['song_list'].apply(eval)\nsubsample_train_df['ground_list'] = subsample_train_df['ground_list'].apply(eval)\nsubsample_train_df['holdout_list'] = subsample_train_df['holdout_list'].apply(eval)\nsubsample_train_df['candidate_list'] = subsample_train_df['candidate_list'].apply(eval).apply(list)\n\n# Specify column names to use for output file\noutput_cols = [\n 'playlist_id',\n 'candidate_song_index',\n # Playlist feature averages\n 'duration_ms_playlist_avg',\n 'acousticness_playlist_avg',\n 'danceability_playlist_avg',\n 'energy_playlist_avg',\n 'instrumentalness_playlist_avg',\n 'tempo_playlist_avg',\n 'valence_playlist_avg',\n # Playlist feature SDs\n 'duration_ms_playlist_sd',\n 'acousticness_playlist_sd',\n 'danceability_playlist_sd',\n 'energy_playlist_sd',\n 'instrumentalness_playlist_sd',\n 'tempo_playlist_sd',\n 'valence_playlist_sd',\n # Candidate song feature values\n 'duration_ms_candidate',\n 'acousticness_candidate',\n 'danceability_candidate',\n 'energy_candidate',\n 'instrumentalness_candidate',\n 'tempo_candidate',\n 'valence_candidate',\n # Whether candidate song is in playlist (i.e. true y value)\n 'd_candidate_in_playlist'\n]\n\n# Transform and save data\nif os.path.exists(fpath):\n write_mode = 'at' # Append if already exists\nelse:\n write_mode = 'wt' # Make a new file if not\nwith open(fpath, write_mode) as fp:\n writer = csv.writer(fp)\n if write_mode == 'wt':\n writer.writerow(output_cols) # Write a header when making a new file\n for i in range(subsample_train_df.shape[0]):\n print(f'Processing row {i}')\n playlist_row = subsample_train_df.iloc[i]\n playlistID = playlist_row['playlist_id']\n ground = playlist_row['ground_list']\n holdout = playlist_row['holdout_list']\n candidate = playlist_row['candidate_list']\n X, y, pid_sid = construct_Xy(playlistID, ground, holdout, candidate)\n lst_of_tup = list(zip(pid_sid, X, y))\n lst_of_tup_flat = [(*pid_sid, *X, y) for pid_sid, X, y in lst_of_tup]\n writer.writerows(lst_of_tup_flat)\n","sub_path":"Data/transform_data_for_modeling.py","file_name":"transform_data_for_modeling.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25909753","text":"from random import Random\n\n\nclass Crop:\n # Tempo de cultivo (turnos)/ Valor nutritivo\n estado = None\n\n def __init__(self, nome, tempo, nutricao):\n self.nome = nome\n self.tempo = tempo\n self.nutricao = nutricao\n self.estado = 0\n\n def actualize(self):\n self.estado += 1\n if self.estado == self.tempo:\n return self.nutricao\n else:\n return 0\n\n\ncropsTypes = [Crop('potato', 6, 2),\n Crop('tomato', 9, 3),\n Crop('rice', 3, 1)\n ]\nclass Villagers:\n def __init__(self):\n self.hunger = 5\n def eat(self, food):\n self.hunger += food\n if self.hunger > 5:\n self.hunger = 5\n\n if self.hunger < -5:\n return -1\n else:\n return 0\n\nclass Village:\n def __init__(self):\n self.villagers = []\n self.crop = cropsTypes[Random.randint(0, 2)]\n self.food = 100\n self.crops = []\n self.maxcrops = Random.randint(5, 15)\n for i in range(Random.randint(1,20)):\n self.villagers.add(Villagers)\n\n\n def next_turn(self):\n for i in self.villagers:\n if self.food > 0:\n self.food -= 1\n if i.eat(1) == -1:\n self.villagers.remove(i)\n continue\n if self.crops.count() > self.maxcrops:\n self.crops.append(self.crop.copy())\n for i in self.crops:\n self.food += i.actualize()\n def print_state(self):\n print('POP: {self.villagers.count}\\t')\n\n\n\ndef main():\n turn = 1\n v1 = Village\n\n while (turn < 10):\n v1.next_turn(v1)\n v1.print_state(v1)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430666150","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport numpy as np\nimport donkeycar as dk\n#import math\n\nclass Lambda:\n \"\"\"\n Wraps a function into a donkey part.\n \"\"\"\n def __init__(self, f):\n \"\"\"\n Accepts the function to use.\n \"\"\"\n self.f = f\n\n def run(self, *args, **kwargs):\n return self.f(*args, **kwargs)\n\n def shutdown(self):\n return\n\nclass LookAt: \n \"\"\"\n I moved this code into the NCS part.. dont work on it here. \n inputs screen coordinates of the detected object (target) and (current pan, current tilt) \n and returns new_pan and new_tilt value twards the center of the detected ojbect target with some slop\n \"\"\"\n def __init__(self, screen_center=(224,224), slop=50, p=0, i=0, d=0,look_for='person', debug=False):\n self.target_center =(0,0)\n self.screen_center = screen_center\n self.debug = debug\n self.slop = slop\n self.look_for = look_for\n # initialize gains\n self.Kp = p\n self.Ki = i\n self.Kd = d\n\n # The value the controller is trying to get the system to achieve.\n self.target = 0\n\n # initialize delta t variables\n self.prev_tm = time.time()\n self.prev_feedback = 0\n self.error = None\n\n # initialize the output\n self.alpha = 0\n self.diff = 0\n self.change_amt = 0\n\n\n def run(self, obj_list, pan_in, tilt_in):\n #self.target_center=target_center\n #(float(target_center[0]),float(target_center[1]))\n self.obj_list = obj_list\n \n \n self.pan_in = float(pan_in)\n self.tilt_in = float(tilt_in)\n self.pan = float(pan_in)\n self.tilt = float(tilt_in)\n \n \"\"\"\n network_classifications = [\"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\",\n \"cat\", \"chair\", \"cow\", \"diningtable\", \"dog\", \"horse\", \"motorbike\",\n \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\",\"tvmonitor\"]\n \n iterate over object in and find the first instance of the \"look_for\" as\n val = default_val\n for x in some_list:\n if match(x):\n val = x\n break\n\n #['person', 209.45312, 229.90625, 310.50378, 406.15082, 0.37283849716186523]\n \"\"\"\n \n for obj in self.obj_list:\n print(obj)\n print(obj[0])\n if self.look_for in obj:\n self.target_center=(obj[1],obj[2])\n break\n \n \n\n\n change_amt = 0.01\n if self.screen_center and self.target_center:\n #self.diff = int(abs((self.target_center[0] - self.screen_center[0]) + (self.target_center[1] - self.screen_center[1])))\n \n sc=np.array(self.screen_center)\n tc=np.array(self.target_center)\n\n td = tc-sc\n self.diff = np.sqrt(td.dot(td))\n #def scale_number(unscaled, to_min, to_max, from_min, from_max):\n #self.diff = scale_number(self.diff,0.002,0.1,0,200)\n #(to_max-to_min)*(unscaled-from_min)/(from_max-from_min)+to_min\n \n self.change_amt = self.diff * 0.0002\n \n #self.change_amt = (0.1-0.001)*(self.diff-0)/(50-0)+0.001\n #def map_range(x, X_min, X_max, Y_min, Y_max):\n #self.change_amt = dk.util.data.map_range(self.diff,0,150,0.001,0.1)\n #self.change_amt = change_amt\n\n #if self.diff > self.slop: \n \n if self.pan > -1 and self.pan < 1: \n \n if self.target_center[0] > self.screen_center[0]:\n self.pan = self.pan + self.change_amt\n else:\n self.pan = self.pan - self.change_amt\n\n if self.tilt > -1 and self.tilt < 1: \n if self.target_center[1] < self.screen_center[1]:\n self.tilt = self.tilt + self.change_amt\n else:\n self.tilt = self.tilt - self.change_amt\n\n if self.debug:\n print('diff:', self.diff, \" change_amt: \", self.change_amt, \" screen center:\" , self.screen_center,\" target center:\" , self.target_center, \n \" pan_in:\", self.pan_in, \" tilt_in:\", self.tilt_in, \n \" pan:\", self.pan, \" tilt:\", self.tilt)\n\n return self.pan, self.tilt\n\n \n\n\n\n\nclass PIDController:\n \"\"\" Performs a PID computation and returns a control value.\n This is based on the elapsed time (dt) and the current value of the process variable\n (i.e. the thing we're measuring and trying to change).\n https://github.com/chrisspen/pid_controller/blob/master/pid_controller/pid.py\n \"\"\"\n\n def __init__(self, p=0, i=0, d=0, debug=False):\n\n # initialize gains\n self.Kp = p\n self.Ki = i\n self.Kd = d\n\n # The value the controller is trying to get the system to achieve.\n self.target = 0\n\n # initialize delta t variables\n self.prev_tm = time.time()\n self.prev_feedback = 0\n self.error = None\n\n # initialize the output\n self.alpha = 0\n\n # debug flag (set to True for console output)\n self.debug = debug\n\n def run(self, target_value, feedback):\n curr_tm = time.time()\n\n self.target = target_value\n error = self.error = self.target - feedback\n\n # Calculate time differential.\n dt = curr_tm - self.prev_tm\n\n # Initialize output variable.\n curr_alpha = 0\n\n # Add proportional component.\n curr_alpha += self.Kp * error\n\n # Add integral component.\n curr_alpha += self.Ki * (error * dt)\n\n # Add differential component (avoiding divide-by-zero).\n if dt > 0:\n curr_alpha += self.Kd * ((feedback - self.prev_feedback) / float(dt))\n\n # Maintain memory for next loop.\n self.prev_tm = curr_tm\n self.prev_feedback = feedback\n\n # Update the output\n self.alpha = curr_alpha\n\n if (self.debug):\n print('PID target value:', round(target_value, 4))\n print('PID feedback value:', round(feedback, 4))\n print('PID output:', round(curr_alpha, 4))\n\n return curr_alpha\n","sub_path":"donkeycar/parts/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"478732652","text":"import pickle\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, BatchNormalization,\\\n Dropout, MaxPooling2D, Flatten\nfrom keras.callbacks import ModelCheckpoint\nfrom sklearn.svm import LinearSVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\n\nimport matplotlib.pyplot as plt\nfrom utils import *\n\n\ndef svm(Xtrain, ytrain, Xtest, ytest):\n ''' implement SVM classifier on 4 cell groups'''\n # reshape tensor\n Xtrain = np.reshape(Xtrain, (len(Xtrain), -1))\n Xtest = np.reshape(Xtest, (len(Xtest), -1))\n ytrain = np.apply_along_axis(lambda x: np.where(x == max(x))[0], axis=1, arr=ytrain)\n ytest = np.apply_along_axis(lambda x: np.where(x == max(x))[0], axis=1, arr=ytest)\n\n classifier = LinearSVC()\n classifier.fit(Xtrain, ytrain)\n return classifier.score(Xtest, ytest)\n\n\ndef knn(Xtrain, ytrain, Xtest, ytest):\n ''' implement kNN classifier on 4 cell groups'''\n # reshape tensor\n Xtrain = np.reshape(Xtrain, (len(Xtrain), -1))\n Xtest = np.reshape(Xtest, (len(Xtest), -1))\n ytrain = np.apply_along_axis(lambda x: np.where(x == max(x))[0], axis=1, arr=ytrain)\n ytest = np.apply_along_axis(lambda x: np.where(x == max(x))[0], axis=1, arr=ytest)\n\n classifier = KNeighborsClassifier()\n classifier.fit(Xtrain, ytrain)\n return classifier.score(Xtest, ytest)\n\ndef cnn_model():\n model = Sequential()\n f, stride = 32, 3\n model.add(Conv2D(f, (stride, stride), activation='relu'))\n model.add(BatchNormalization())\n m_stride = 2\n model.add(MaxPooling2D((m_stride, m_stride)))\n\n f, stride = 64, 3\n model.add(Conv2D(f, (stride, stride), activation='relu'))\n model.add(BatchNormalization())\n m_stride = 2\n model.add(MaxPooling2D((m_stride, m_stride)))\n\n f, stride = 128, 3\n model.add(Conv2D(f, (stride, stride), activation='relu'))\n model.add(BatchNormalization())\n m_stride = 2\n model.add(MaxPooling2D((m_stride, m_stride)))\n\n f, stride = 64, 3\n model.add(Conv2D(f, (stride, stride), activation='relu'))\n model.add(BatchNormalization())\n m_stride = 2\n model.add(MaxPooling2D((m_stride, m_stride)))\n\n d = 100\n cat = 4\n model.add(Flatten())\n model.add(Dense(d, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(cat, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\n\ndef cnn_kernel():\n # implement CNN model from kernel 2\n model = Sequential()\n model.add(Conv2D(filters=16, kernel_size=5, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.2))\n\n model.add(Conv2D(filters=8, kernel_size=5, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.2))\n\n model.add(Conv2D(filters=4, kernel_size=5, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.2))\n\n model.add(Conv2D(filters=4, kernel_size=5, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(16, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(8, activation='relu'))\n model.add(Dropout(0.2))\n cat = 4\n model.add(Dense(cat, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\n\ndef main():\n # get data\n X_train, y_train = get_data('../dataset2-master/images/TRAIN/')\n X_test, y_test = get_data('../dataset2-master/images/TEST/')\n # X_train, y_train = get_data('../dataset-master/augmented/TRAIN/')\n # X_test, y_test = get_data('../dataset-master/augmented/TEST/')\n #acc = svm(X_train, y_train, X_test, y_test)\n #acc = knn(X_train, y_train, X_test, y_test)\n #print(acc)\n # build model\n model = cnn_model() # cnn 1\n #model = cnn_kernel() # cnn 2\n filepath = \"../results/weight_tr5.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc',\n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint]\n history = model.fit(X_train, y_train, epochs=100, batch_size=1024,\n callbacks = callbacks_list, validation_data=(X_test, y_test))\n print(model.summary())\n score = model.evaluate(X_test, y_test, verbose=0)\n print(\"Prediction score:\", score)\n y_pred = model.predict(X_test)\n y_pred_classes = np.argmax(y_pred, axis=1)\n y_true = np.argmax(y_test, axis=1)\n confusion_mtx = confusion_matrix(y_true, y_pred_classes)\n with open(\"./train_hist_original\", \"wb\") as fname:\n pickle.dump(history.history, fname)\n np.savetxt(\"confusion_matrix_original.txt\", confusion_mtx, fmt=\"%.4f\")\n # plot results\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333563520","text":"# encoding: utf8\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('engagement_opportunity', '0005_auto_20140522_2004'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='engagementopportunity',\n name='provider_action_type',\n field=models.PositiveSmallIntegerField(choices=[(1, 'Tweet'), (2, 'Reddit Comment'), (3, 'Reddit Self Post'), (4, 'Reddit Link Post')]),\n ),\n ]\n","sub_path":"src/aggregates/engagement_opportunity/migrations/0006_auto_20140523_0032.py","file_name":"0006_auto_20140523_0032.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"435207810","text":"import cv2\nimport yaml\nimport numpy as np\n\n\nclass CameraIntrinsics:\n \"\"\" Camera intrinsics\n\n Attributes\n ----------\n cam_id : str\n Camera ID\n camera_model : str\n Camera model\n distortion_model : str\n Distortion model\n distortion_coeffs : str\n Distortion coefficients\n intrinsics : np.array\n Camera intrinsics\n resolution : np.array\n Camera resolution\n\n \"\"\"\n def __init__(self, filepath):\n self.camera_model = None\n self.distortion_model = None\n self.distortion_coeffs = None\n self.intrinsics = None\n self.resolution = None\n self.K_new = None\n\n self.load(filepath)\n\n def load(self, filepath):\n \"\"\" Load camera intrinsics\n\n `filepath` is expected to point towards a yaml file produced by\n Kalibr's camera calibration process, the output is expected to have the\n following format:\n\n [Kalibr]: https://github.com/ethz-asl/kalibr\n\n ```\n cam0:\n cam_overlaps: [1]\n camera_model: pinhole\n distortion_coeffs: [k1, k2, k3, k4]\n distortion_model: equidistant\n intrinsics: [fx, fy, cx, cy]\n resolution: [px, py]\n rostopic: \"...\"\n cam1:\n T_cn_cnm1:\n - [1, 0, 0, 0]\n - [0, 1, 0, 0]\n - [0, 0, 1, 0]\n - [0, 0, 0, 1]\n cam_overlaps: [0]\n camera_model: pinhole\n distortion_coeffs: [k1, k2, k3, k4]\n distortion_model: equidistant\n intrinsics: [fx, fy, cx, cy]\n resolution: [px, py]\n rostopic: \"...\"\n ```\n\n Parameters\n ----------\n cam_id : int\n Camera ID\n filepath : str\n Path to camera intrinsics file\n\n \"\"\"\n intrinsics_file = open(filepath, \"r\")\n intrinsics_txt = intrinsics_file.read()\n intrinsics_file.close()\n intrinsics = yaml.load(intrinsics_txt)\n\n self.camera_model = intrinsics[\"camera_model\"]\n self.distortion_model = intrinsics[\"distortion_model\"]\n self.distortion_coeffs = intrinsics[\"distortion_coeffs\"]\n self.intrinsics = intrinsics[\"intrinsics\"]\n self.resolution = intrinsics[\"resolution\"]\n\n def K(self):\n \"\"\" Form camera intrinsics matrix K \"\"\"\n fx, fy, cx, cy = self.intrinsics\n K = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])\n return K\n\n def D(self):\n \"\"\" Form distortion coefficients vector D \"\"\"\n return np.array(self.distortion_coeffs)\n\n def calc_Knew(self):\n \"\"\" Calculate new camera intrinsics matrix K for equidistant distortion\n model \"\"\"\n # Pre-check\n if self.distortion_model != \"equidistant\":\n raise RuntimeError(\"Only supports equidistant at the moment!\")\n\n # Get distortion model and form camera intrinsics matrix K\n distortion_coeffs = np.array(self.distortion_coeffs)\n K = self.K()\n\n D = distortion_coeffs # (k1, k2, k3, k4)\n img_size = (self.resolution[0], self.resolution[1])\n R = np.eye(3)\n balance = 0.0\n\n # Calculate Knew\n K_new = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(\n K,\n D,\n img_size,\n R,\n balance=balance\n )\n return K_new\n\n def undistort_points(self, points):\n \"\"\" Undistort points\n\n Parameters\n ----------\n points : np.array\n Points to undistort in pixel coordinates\n\n Return\n ----------\n points : np.array\n Undistorted points in ideal coordinates\n\n \"\"\"\n # Get distortion model and form camera intrinsics matrix K\n distortion_coeffs = np.array(self.distortion_coeffs)\n K = self.K()\n\n # Undistort points\n if self.distortion_model == \"radtan\":\n # Distortion coefficients (k1, k2, r1, r2)\n points = cv2.undistortPoints(points, K, distortion_coeffs)\n elif self.distortion_model == \"equidistant\":\n # Distortion coefficients (k1, k2, k3, k4)\n points = cv2.fisheye.undistortPoints(points, K, distortion_coeffs)\n\n return points\n\n def undistort_image(self, image):\n # Get distortion model and form camera intrinsics matrix K\n distortion_coeffs = np.array(self.distortion_coeffs)\n K = self.K()\n\n # Undistort points\n if self.distortion_model == \"radtan\":\n D = distortion_coeffs # (k1, k2, r1, r2)\n image = cv2.undistort(image, K, distortion_coeffs)\n\n elif self.distortion_model == \"equidistant\":\n D = distortion_coeffs # (k1, k2, k3, k4)\n img_size = (image.shape[1], image.shape[0])\n R = np.eye(3)\n balance = 0.0\n\n K_new = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(\n K,\n D,\n img_size,\n R,\n balance=balance\n )\n undistorted_image = cv2.fisheye.undistortImage(image,\n K,\n D,\n None,\n K_new)\n\n self.K_new = K_new\n return undistorted_image, K_new\n\n def __str__(self):\n \"\"\" CameraIntrinsics to string \"\"\"\n s = \"cam_id: \" + self.cam_id + \"\\n\"\n s += \"camera_model: \" + self.camera_model + \"\\n\"\n s += \"distortion_model: \" + self.distortion_model + \"\\n\"\n s += \"distortion_coeffs: \" + str(self.distortion_coeffs) + \"\\n\"\n s += \"intrinsics: \" + str(self.intrinsics) + \"\\n\"\n s += \"resolution: \" + str(self.resolution)\n return s\n","sub_path":"prototype/calibration/camera_intrinsics.py","file_name":"camera_intrinsics.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"247825352","text":"'''\nInput: an integer\nReturns: an integer\n'''\n\n# recursion to find all possible sums: create an array of 1s with the length of n, then find all possible \n# contractions of the 1s. store number of possible combinations of subsets in dictionary\n\n\n\n\ndef eating_cookies(n):\n # Your code here\n\n lookup = {0:1, 1:1, 2:2, 3:4}\n\n \n for i in range(n+1):\n\n if i in lookup:\n\n continue\n\n else:\n\n lookup[i] = lookup[i-1] + lookup[i-2] + lookup[i-3]\n\n \n return lookup[n]\n\n \n\nif __name__ == \"__main__\":\n # Use the main function here to test out your implementation\n num_cookies = 5\n\n print(f\"There are {eating_cookies(num_cookies)} ways for Cookie Monster to each {num_cookies} cookies\")\n","sub_path":"eating_cookies/eating_cookies.py","file_name":"eating_cookies.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"654323052","text":"import numpy as np\nimport mapsfunction as mf\nimport parameters as par\nimport scipy.ndimage.morphology as mph\nimport matplotlib.pyplot as plt\nfrom os.path import isfile\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n#from scipy.optimize import curve_fit\n#from scipy.stats import linregress\n#from mpi4py import MPI\n\ndef reconstLogNorm(z, pxlen, thres, N_part, R_mu_real, R_sigma_real):\n z_obj_list, z_labeled = mf.identObj(z, thres)\n mf.plotThres(z, z_labeled, pxlen, 'found ' + str(len(z_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres)+'nm')\n \n R_list = [(np.max(np.shape(obj_i)))*pxlen/2 for obj_i in z_obj_list]\n \n R_mean = np.mean(R_list)\n R_std = np.std(R_list)\n \n R_mu = np.log(R_mean / np.sqrt(1 + R_std**2 / R_mean**2)) # recalculated gaussian\n R_sigma = np.sqrt(np.log(1 + (R_std/R_mean)**2)) # reculaculated gaussian\n \n x = np.linspace(R_mean - 3*R_std, R_mean + 3*R_std, 1000)\n pdf = 1 / (x * R_sigma * np.sqrt(2*np.pi)) * np.exp(-(np.log(x) - R_mu)**2 / (2*R_sigma**2))\n pdf_real = 1 / (x * R_sigma_real * np.sqrt(2*np.pi)) * np.exp(-(np.log(x) - R_mu_real)**2 / (2*R_sigma_real**2))\n \n plt.figure()\n plt.hist(R_list, bins=12, density=True, edgecolor='black', linewidth=2, color='grey', alpha=0.5)\n plt.plot(x, pdf, color='r', linewidth=3.5, label='empiric distribution (R_mu = {0:.3f}, R_sigma = {1:.3f})'.format(R_mu, R_sigma))\n plt.plot(x, pdf_real, color='green', linewidth=3.5, label='real distribution (R_mu = {0:.3f}, R_sigma = {1:.3f})'.format(R_mu_real, R_sigma_real))\n plt.xlabel(r'$R_{part} [nm]$')\n plt.ylabel('frequency')\n plt.title('found ' + str(len(z_obj_list)) + ' of ' + str(N_part) + ' particles, thres=' + str(thres)+'nm')\n plt.legend(loc=1)\n plt.tight_layout()\n\n return R_mean, R_std, R_mu, R_sigma\n \ndef partNum(z, pxlen, R_mu, R_sigma):\n A = (np.shape(z)[1]-1)*(np.shape(z)[0]-1) * pxlen**2\n V = par.V(z, pxlen)\n \n V_mean = 4/3 * np.pi * np.exp(3*R_mu + 3**2*R_sigma**2/2)\n \n N_particles = V / V_mean\n eff_cov = N_particles / A\n \n return N_particles, eff_cov\n\ndef C_gauss_prof_semilog(lw, L_corr): return -lw[0]/L_corr + 2*np.ln(lw[1])\ndef G_gauss_prof(lLw, alfa):\n l_xy,L,w=lLw\n return 2*w**2 *( 1-np.exp(- (l_xy/L)**(2*alfa)) )\n\ndef partDep(Npx, pxlen, step_sim, N_part_min, N_part_max, N_part_step, R_mu, R_sigma, firstmap='', usefile=False, savefile=False):\n N_part = np.linspace(np.log10(N_part_min), np.log10(N_part_max), N_part_step)\n N_part = np.round(10**N_part)\n N_part.astype(int, copy=False)\n \n# N_est = np.array([]) #2+1D only\n V_est = np.array([])\n rms_est = np.array([])\n h_est = np.array([])\n h_top = np.array([])\n \n C_true=[]\n G_true=[]\n C2_true=[]\n G2_true=[]\n C_list=[]\n G_list=[]\n\n for i in range(step_sim):\n if firstmap=='': #initialize map\n #z = np.zeros(Npx) #1+1D\n z=mf.genFlat(Npx)\n N_prec=0\n V_real=0\n else:\n z = np.loadtxt(firstmap)\n dotpos=firstmap.find('.')\n start=dotpos-1\n for i in range(dotpos-1):\n if firstmap[start]=='_': break\n start-=1\n N_prec=int(firstmap[start+1:dotpos])\n \n out=open(firstmap)\n head=out.readline()\n out.close()\n start=head.find('V=')+2\n V_real=float(head[start:head.find(';', start)])\n\n for N in N_part:\n print('Sim.',i+1,'; N=',str(N)[:len(str(N))-2], end=' ')\n if usefile and isfile('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat'):\n print('map from file ...', end=' ')\n z= np.loadtxt('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat')\n out=open('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat')\n head=out.readline()\n out.close()\n start=head.find('V=')+2\n V_real=float(head[start:head.find(';', start)])\n else:\n #print('generating map ...', end=' ')\n #z, R_part_real = mf.genLogNormSolidSph(z,pxlen,int(N-N_prec),R_mu,R_sigma)\n #z=mf.genLattice1d(z,pxlen,int(N-N_prec)) #1+1D\n #V_real += (N-N_prec)* pxlen**2 #1+1D\n z=mf.genLattice2d(z,pxlen,int(N-N_prec))\n V_real += (N-N_prec)* pxlen**3\n #V_real += 4/3*np.pi * np.sum(R_part_real**3)\n if savefile: np.savetxt('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat', z, header='V='+str(V_real)+'; Npx,pxlen,Npart in filename')\n #if savefile: np.savetxt('matrice.mat', z)\n N_prec=N\n \n #print('computing parameters ...')\n #N_est=np.append(N_est, partNum(z,pxlen,R_mu,R_sigma)[0]/N)\n V_est=np.append(V_est, par.V(z,pxlen)/V_real)\n rms_est=np.append(rms_est, np.std(z))\n h_est=np.append(h_est, np.mean(z))\n h_top=np.append(h_top, np.amax(z))\n \n #print('computing correlations ...')\n l,C=par.C_profile(z, pxlen, 1)\n C_list.append(C)\n #C_list.append(par.C_1d(z)) #1+1D\n \n l,C=par.G_profile(z, pxlen, 1)\n G_list.append(C)\n #G_list.append(par.G_1d(z)) #1+1D\n print('',end='\\r')\n #print()\n \n if i==0: \n for el in C_list:\n C_true.append(el)\n C2_true.append(el**2)\n for el in G_list:\n G_true.append(el)\n G2_true.append(el**2)\n else:\n for i in range(len(C_true)):\n C_true[i]=C_true[i]+C_list[i]\n C2_true[i]=C2_true[i]+C_list[i]**2\n for i in range(len(G_true)):\n G_true[i]=G_true[i]+G_list[i]\n G2_true[i]=G2_true[i]+G_list[i]**2\n C_list.clear()\n G_list.clear()\n\n \n\n filename=['V_relVsN.dat', 'rmsVsN.dat', 'hVsN.dat', 'maxhVsN.dat']\n est=[V_est, rms_est, h_est, h_top]\n \n for j in range(len(est)):\n err=np.array([])\n for i in range(len(N_part)):\n if step_sim==1: err=np.append(err, 0)\n else: err=np.append(err, np.std(est[j][i::len(N_part)]))\n est[j][i]=np.mean(est[j][i::len(N_part)])\n\n \n np.savetxt(filename[j], np.array([ est[j][:len(N_part)], N_part, err ]),\n header=str(Npx) + ' ' + str(pxlen) + '\\n' +\n r'$N_{px}$ $L_{px}$')\n #header=str(R_mu) + ' ' + str(R_sigma) + ' ' + str(Npx) + ' ' + str(pxlen) + '\\n' +\n #r'$\\mu_R$ $\\sigma_R$ $N_{px}$ $L_{px}$')\n print('data saved in '+ filename[j] +' and in folder maps/')\n \n C_true=np.array(C_true)/step_sim\n G_true=np.array(G_true)/step_sim\n C2_true=np.array(C2_true)/step_sim - C_true**2\n G2_true=np.array(G2_true)/step_sim - G_true**2\n# np.savetxt('x.dat', l)\n np.savetxt('C.dat', C_true)\n np.savetxt('G.dat', G_true)\n np.savetxt('C2.dat', C2_true)\n np.savetxt('G2.dat', G2_true)\n print('correlations saved in files C,G,C2,G2')\n\ndef partDep_mpi(Npx, pxlen, step_sim, N_part_min, N_part_max, N_part_step, R_mu, R_sigma, firstmap='', usefile=False, savefile=False):\n N_part = np.linspace(np.log10(N_part_min), np.log10(N_part_max), N_part_step)\n N_part = np.round(10**N_part)\n N_part.astype(int, copy=False)\n \n N_est = np.array([])\n V_est = np.array([])\n rms_est = np.array([])\n h_est = np.array([])\n h_top = np.array([])\n \n# L_corr_est = np.array([])\n# L_corr_err = np.array([])\n# alfa_est=np.array([])\n# alfa_err=np.array([])\n \n C_true=[]\n G_true=[]\n C2_true=[]\n G2_true=[]\n C_list=[]\n G_list=[]\n comm=MPI.COMM_WORLD\n size_mpi=comm.Get_size()\n for i in range(step_sim/size_mpi):\n if firstmap=='': #initialize map\n z = mf.genFlat(Npx)\n N_prec=0\n V_real=0\n else:\n z = np.loadtxt(firstmap)\n dotpos=firstmap.find('.')\n start=dotpos-1\n for i in range(dotpos-1):\n if firstmap[start]=='_': break\n start-=1\n N_prec=int(firstmap[start+1:dotpos])\n \n out=open(firstmap)\n head=out.readline()\n out.close()\n start=head.find('V=')+2\n V_real=float(head[start:head.find(';', start)])\n\n for N in N_part:\n print('Sim.',i+1,'; N=',str(N)[:len(str(N))-2], end=' ')\n if usefile and isfile('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat'):\n print('map from file ...', end=' ')\n z= np.loadtxt('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat')\n out=open('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat')\n head=out.readline()\n out.close()\n start=head.find('V=')+2\n V_real=float(head[start:head.find(';', start)])\n else:\n print('generating map ...', end=' ')\n z, R_part_real = mf.genLogNormSolidSph(z,pxlen,int(N-N_prec),R_mu,R_sigma)\n mf.plotfalsecol(z,pxlen)\n V_real += np.sum(4/3 * np.pi * R_part_real**3)\n if savefile: np.savetxt('maps/lognorm_'+str(Npx)+'_'+str(pxlen)+'_'+str(N)[:len(str(N))-2]+'.dat', z, header='V='+str(V_real)+'; Npx,pxlen,Npart in filename')\n N_prec=N\n \n print('computing parameters ...')\n N_est=np.append(N_est, partNum(z,pxlen,R_mu,R_sigma)[0]/N)\n V_est=np.append(V_est, par.V(z,pxlen)/V_real)\n rms_est=np.append(rms_est, np.std(z))\n h_est=np.append(h_est, np.mean(z))\n h_top=np.append(h_top, np.amax(z))\n \n #print('computing correlations ...')\n l,C=par.C_profile(z, 2, 800) \n C_list.append(C)\n \n l,C=par.G_profile(z, 2, 800)\n G_list.append(C)\n \n if i==0: \n for el in C_list:\n C_true.append(el)\n C2_true.append(el**2)\n for el in G_list:\n G_true.append(el)\n G2_true.append(el**2)\n else:\n for i in range(len(C_true)):\n C_true[i]=C_true[i]+C_list[i]\n C2_true[i]=C2_true[i]+C_list[i]**2\n for i in range(len(G_true)):\n G_true[i]=G_true[i]+G_list[i]\n G2_true[i]=G2_true[i]+G_list[i]**2\n C_list.clear()\n G_list.clear()\n\n est=[N_est, V_est, rms_est, h_est, h_top]\n rank=comm.Get_rank()\n if rank!=0: comm.Send(est, dest=0)\n if rank==0:\n rec=[]\n for i in range(1, size_mpi):\n comm.Recv(rec, source=i)\n for k in range(len(est)):\n est[k]=np.append(est[k], rec[k])\n rec.clear()\n \n C_true=np.array(C_true)/(step_sim/size_mpi)\n G_true=np.array(G_true)/(step_sim/size_mpi)\n C2_true=np.array(C2_true)/(step_sim/size_mpi) - C_true**2\n G2_true=np.array(G2_true)/(step_sim/size_mpi) - G_true**2\n corr=[C_true, G_true, C2_true, G2_true]\n \n if rank!=0: comm.Send(corr, dest=0)\n if rank==0:\n for i in range(1, size_mpi):\n comm.Recv(rec, source=i)\n for k in range(4):\n corr[k]=np.append(corr[k], rec[k])\n rec.clear()\n corr[0]=np.array(corr[0])/size_mpi\n corr[1]=np.array(corr[1])/size_mpi\n corr[2]=np.array(corr[2])/size_mpi - corr[0]**2\n corr[3]=np.array(corr[3])/size_mpi - corr[1]**2\n \n filename=['N_relVsN.dat', 'V_relVsN.dat', 'rmsVsN.dat', 'hVsN.dat', 'maxhVsN.dat']\n \n for j in range(len(est)):\n err=np.array([])\n for i in range(len(N_part)):\n if step_sim==1: err=np.append(err, 0)\n else: err=np.append(err, np.std(est[j][i::len(N_part)]))\n est[j][i]=np.mean(est[j][i::len(N_part)])\n \n \n np.savetxt(filename[j], np.array([ est[j][:len(N_part)], N_part, err ]),\n header=str(R_mu) + ' ' + str(R_sigma) + ' ' + str(Npx) + ' ' + str(pxlen) + '\\n' +\n r'$\\mu _R$ $\\sigma _R$ $N_{px}$ $L_{px}$')\n print('data saved in '+ filename[j] +' and in folder maps/')\n \n np.savetxt('correlations.dat', np.array([ l*pxlen, corr[0], corr[1], corr[2], corr[3]]), fmt='%s')\n \ndef plotTipDep(z, pxlen, h, R_mu, R_sigma, N_part_real):\n R_tip = np.linspace(0.01, 20, 10)\n N_part_est = []\n for R in R_tip:\n tip = mf.genParabolicTip(pxlen,h,r=R) \n img = mph.grey_dilation(z, structure=-tip)\n N_part_est.append(partNum(img, pxlen, R_mu, R_sigma)[0])\n print(R)\n \n plt.figure()\n plt.plot(R_tip, np.array(N_part_est) / N_part_real, color='r', marker='o')\n plt.xlabel(r'$R_{tip} [nm]$')\n plt.ylabel(r'$N_{part,est} / N_{part,real}$')\n plt.title(r'$N_{part,real} = $' + str(N_part_real) + r'$, \\mu_R = $' + str(R_mu) + r'$nm, \\sigma_R = $' + str(R_sigma) + r'$nm$')\n plt.grid()\n\ndef plotpar_fromfile(filename, xlab, ylab, xscale='linear', yscale='linear', a=0, b=0):\n est, Ntrue, err, wl = np.loadtxt(filename)\n \n out=open(filename)\n line1=out.readline()\n line2=out.readline()\n param=np.fromstring(line1[2:], sep=' ')\n name=line2[2:].split()\n out.close()\n \n plt.figure()\n if filename.find('N_rel')!=-1:\n est=est/Ntrue\n err=err/Ntrue\n print('last value:', est[-1])\n plt.errorbar(Ntrue, est, yerr=err)\n plt.xscale(xscale)\n plt.yscale(yscale)\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.grid()\n plt.title(name[0]+'='+str(round(param[0], 2))+', '+name[1]+'='+str(round(param[1], 2))+', '+\n name[2]+'='+str(param[2])+', '+name[3]+'='+str(param[3])+', '+name[4]+'='+str(param[4])+', '+\n name[5]+'='+str(param[5]) )\n if aa and not(passedA):\n a=i\n passedA=True\n \n if Ntrue[i]>b: break\n else: b_ind=i\n \n x=Ntrue[a:b_ind].reshape(-1,1)\n y= est[a:b_ind].reshape(-1,1)\n x_log=np.log10(x)\n y_log=np.log10(y)\n model = LinearRegression().fit(x_log,y_log)\n plt.plot(x,10 ** model.predict(x_log), color='red')\n print('coefficient:',model.coef_[0][0])\n print('mean squared error:', mean_squared_error(y_log, model.predict(x_log)) )\n \n plt.figure()\n plt.title('wavelength in direction x')\n plt.xscale(xscale)\n plt.yscale(yscale)\n plt.xlabel(xlab)\n plt.ylabel(r'$ \\lambda _x [L_{px}] $')\n plt.plot(Ntrue, wl)\n plt.grid()\n plt.show()","sub_path":"ballistic_depo/part_num.py","file_name":"part_num.py","file_ext":"py","file_size_in_byte":15077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"116301268","text":"from src.core import Script, Parameter\nfrom PySide.QtCore import Signal, QThread\nfrom src.instruments import SpectrumAnalyzer, CryoStation\nfrom src.scripts import KeysightGetSpectrum\nimport numpy as np\nimport time\n\n\nclass KeysightSpectrumVsPower(Script, QThread):\n\n # NOTE THAT THE ORDER OF Script and QThread IS IMPORTANT!!\n _DEFAULT_SETTINGS = Parameter([\n Parameter('path', 'Z:/Lab/Cantilever/Measurements/----data_tmp_default----', str, 'path for data'),\n Parameter('tag', 'dummy_tag', str, 'tag for data'),\n Parameter('save', True, bool, 'save data on/off'),\n Parameter('power_out_min',-45.0, float, 'output power (dBm) min'),\n Parameter('power_out_step',5.0, float, 'output power (dBm) step'),\n Parameter('power_out_max',-15.0, float, 'output power (dBm) max'),\n Parameter('wait_time',2.0, float, 'wait time in seconds')\n ])\n\n _INSTRUMENTS = {\n 'cryo_station' : CryoStation\n }\n\n _SCRIPTS = {\n 'get_spectrum' : KeysightGetSpectrum\n }\n updateProgress = Signal(int)\n def __init__(self, instruments, scripts, name = None, settings = None, log_output = None):\n \"\"\"\n Example of a script that emits a QT signal for the gui\n Args:\n name (optional): name of script, if empty same as class name\n settings (optional): settings for this script, if empty same as default settings\n \"\"\"\n Script.__init__(self, name, settings = settings,scripts =scripts, instruments = instruments, log_output = log_output)\n QThread.__init__(self)\n def _function(self):\n \"\"\"\n This is the actual function that will be executed. It uses only information that is provided in the settings property\n will be overwritten in the __init__\n \"\"\"\n def calc_progress(power):\n min, max =self.settings['power_out_min'], self.settings['power_out_max']\n\n progress = ( power-min ) / ( max-min ) * 100.0\n\n return progress\n\n power_values = [float(power) for power in np.arange(self.settings['power_out_min'], self.settings['power_out_max'], self.settings['power_out_step'])]\n\n stage_1_temp = []\n stage_2_temp = []\n platform_temp = []\n times = []\n spectrum = []\n uwave_power = []\n\n spectrum_analyzer = self.scripts['get_spectrum'].instruments['spectrum_analyzer']\n\n initial_power = spectrum_analyzer.settings['output_power']\n print('initial_power', initial_power)\n\n self.save(save_data=False, save_instrumets=True, save_log=False, save_settings=True)\n\n for power in power_values:\n\n self.log('current u-wave power : {:0.2f} dBm'.format(power))\n uwave_power.append(power)\n times.append(time.strftime('%Y_%m_%d_%H_%M_%S'))\n stage_1_temp.append(self.instruments['cryo_station'].platform_temp)\n stage_2_temp.append(self.instruments['cryo_station'].stage_1_temp)\n platform_temp.append(self.instruments['cryo_station'].stage_2_temp)\n\n # set power and wait to thermalized\n spectrum_analyzer.mode = 'TrackingGenerator'\n spectrum_analyzer.output_power = power\n time.sleep(self.settings['wait_time']) #since the spectrum analyzer takes a full second =)\n\n self.scripts['get_spectrum'].update({'output_power': power})\n self.scripts['get_spectrum'].run()\n\n freq = self.scripts['get_spectrum'].data['frequency']\n transmission = self.scripts['get_spectrum'].data['spectrum']\n\n spectrum.append(transmission)\n\n data = {\n 'stage_1_temp' : stage_1_temp,\n 'stage_2_temp' : stage_2_temp,\n 'platform_temp' : platform_temp,\n 'times' : times,\n 'spectrum' : spectrum,\n 'frequency' : freq,\n 'uwave_power' : uwave_power\n }\n\n self.data = data\n\n self.save(save_data=True, save_instrumets=False, save_log=False, save_settings=False)\n\n progress = calc_progress(power)\n self.updateProgress.emit(progress)\n\n\n self.save(save_data=False, save_instrumets=False, save_log=True, save_settings=False)\n\n spectrum_analyzer.output_power = initial_power\n # send 100 to signal that script is finished\n self.updateProgress.emit(100)\n\n\n def plot(self, axes):\n\n self.scripts['get_spectrum'].plot(axes)\n","sub_path":"src/scripts/keysight_spectra_vs_power.py","file_name":"keysight_spectra_vs_power.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"322393521","text":"# Copyright 2016-present CERN – European Organization for Nuclear Research\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List\nfrom matplotlib.rcsetup import cycler\nimport pandas as pd\nfrom qf_lib.analysis.common.abstract_document import AbstractDocument\nfrom qf_lib.common.utils.dateutils.relative_delta import RelativeDelta\nfrom qf_lib.common.utils.error_handling import ErrorHandling\nfrom qf_lib.containers.series.qf_series import QFSeries\nfrom qf_lib.documents_utils.document_exporting.element.chart import ChartElement\nfrom qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement\nfrom qf_lib.documents_utils.document_exporting.element.paragraph import ParagraphElement\nfrom qf_lib.documents_utils.document_exporting.pdf_exporter import PDFExporter\nfrom qf_lib.settings import Settings\nimport matplotlib as plt\nfrom datetime import datetime\n\n\n@ErrorHandling.class_error_logging()\nclass FactorComparisonSheet(AbstractDocument):\n \"\"\"Creates a PDF report\n\n Parameters\n ----------\n settings: Settings\n settings of the project\n pdf_exporter: PDFExporter\n tool that creates the pdf with the result\n factors_series: list\n list of timeseries with factors' indices\n benchmark_series: QFSeries\n timeseries of the benchmark\n title: str\n title of the document\n \"\"\"\n def __init__(self, settings: Settings, pdf_exporter: PDFExporter, factors_series: List[QFSeries],\n benchmark_series: QFSeries, title: str = \"Factor Comparison\"):\n super().__init__(settings, pdf_exporter)\n self.factors_series = factors_series\n self.benchmark_series = benchmark_series\n self.title = title\n\n def build_document(self):\n \"\"\"Creates a document with charts\"\"\"\n self._add_header()\n\n end_date = pd.concat(self.factors_series, axis=1).index.max()\n start_date = end_date - RelativeDelta(years=1)\n\n all_series_one_year = [self.benchmark_series.loc[start_date:]] + \\\n [series.loc[start_date:] for series in self.factors_series]\n\n self._add_perf_chart_for_factor(series_list=all_series_one_year,\n title=\"Factors - 1 Year\")\n\n all_series = [self.benchmark_series] + self.factors_series\n self._add_perf_chart_for_factor(series_list=all_series,\n title=\"Factors - Full History\")\n\n for series in self.factors_series:\n self.document.add_element(NewPageElement())\n self._add_header()\n self._add_perf_chart_for_factor(series_list=[series.loc[start_date:],\n self.benchmark_series.loc[start_date:]],\n title=\"{} - 1 Year\".format(series.name))\n self._add_relative_performance_chart(\n series.loc[start_date:], self.benchmark_series.loc[start_date:],\n chart_title=\"Relative Performance\", legend_subtitle=\"Factor - Benchmark\")\n\n self._add_perf_chart_for_factor(series_list=[series, self.benchmark_series],\n title=\"{} - Full History\".format(series.name))\n self.document.add_element(ParagraphElement(\"\\n\"))\n self._add_relative_performance_chart(\n series, self.benchmark_series,\n chart_title=\"Relative Performance\", legend_subtitle=\"Factor - Benchmark\")\n\n def save(self, report_dir: str = \"\"):\n \"\"\"Saves document to the file\"\"\"\n plt.style.use(['tearsheet'])\n # Change the color map for the plots to use 10 different colors\n hex_colors = [plt.colors.rgb2hex(c) for c in plt.cm.tab10(range(10))]\n plt.rcParams['axes.prop_cycle'] = cycler(color=hex_colors)\n\n file_name = \"%Y_%m_%d-%H%M {}.pdf\".format(self.title)\n file_name = datetime.now().strftime(file_name)\n\n if not file_name.endswith(\".pdf\"):\n file_name = \"{}.pdf\".format(file_name)\n\n return self.pdf_exporter.generate([self.document], report_dir, file_name)\n\n def _add_perf_chart_for_factor(self, series_list: List[QFSeries], title: str = \"Factor Index Performance\"):\n \"\"\" Add performance chart for factor\n\n Parameters\n ----------\n series_list: List[QFSeries]\n list of compared series\n title: str\n chart title\n \"\"\"\n self.document.add_element(ChartElement(self._get_perf_chart(series_list, is_large_chart=True,\n title=title),\n figsize=self.full_image_size, dpi=self.dpi))\n","sub_path":"qf_lib/analysis/tearsheets/factor_comparison_sheet.py","file_name":"factor_comparison_sheet.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"159927693","text":"from __future__ import print_function\nimport json\nimport tempfile\nimport os\nfrom fabric.api import *\nfrom fabric.utils import *\nfrom fabric.contrib import *\nfrom lib.apt import *\nfrom lib.puppet import *\nfrom lib.pin import *\nfrom lib.ping import *\nfrom lib.systemd import *\nfrom lib.kubernetes import *\nfrom lib.etcdTools import *\nfrom time import sleep\n\n\nclass FabricException (Exception):\n pass\n\n\nclass FindBin(object):\n\n def __init__(self, myfile):\n self.status = True\n self.paths = ['/bin', '/usr/bin', '/usr/sbin', '/sbin', '/opt/bin']\n self.occurrence = 0\n self.error = ''\n self.path = self.which(myfile)\n\n def which(self, myfile):\n for path in self.paths:\n with hide('output', 'running', 'stderr'):\n commandout = run(\"file %s/%s\" % (path, myfile))\n if 'cannot open' not in commandout:\n res = path\n self.occurrence = self.occurrence + 1\n if self.occurrence > 1:\n self.status = False\n self.error = \"too many occurrences of: \", myfile\n return res\n\n\nclass StackData(object):\n\n def __init__(self):\n self.kubepackages = json.loads(json.dumps({\n 'kubelet': {\n 'is_service': True,\n 'pin_priority': '1001'\n },\n 'kubectl': {\n 'is_service': False,\n 'pin_priority': '1001',\n 'only_for_master': True\n }\n }))\n\n self.repos = json.loads(json.dumps({'apt': {'kubernetes_apt_repository': {\n 'repo': 'deb http://apt.kubernetes.io/ kubernetes-xenial main',\n 'pin_pin': 'origin apt.kubernetes.io',\n 'pin_priority': '500',\n 'explain': 'Explanation: apt: kubernetes_apt_repository',\n 'package': '*'\n }\n },\n 'git': {'kubernetes_git_repository': {\n 'branch': '1.5.2'\n }\n }\n }))\n\n self.path = json.loads(json.dumps({'pin': '/etc/apt/preferences.d',\n 'source': '/etc/apt/sources.list.d',\n 'policy': '/etc/kubernetes/policy',\n 'manifest': '/etc/kubernetes/manifests',\n 'systemd': '/etc/systemd/system'\n }))\n\n def getpath(self, thing):\n return(self.path.get(thing))\n\nclass SetGit(object):\n\n def __init__(self, local_dir, branch):\n '''\n It's supposed the local_dir exists!\n '''\n cmd = 'cd %(local_dir)s && \\\n sudo git pull && \\\n sudo git checkout %(branch)s && \\\n sudo git branch --set-upstream-to=origin/%(branch)s %(branch)s && \\\n sudo git pull' % {'local_dir': local_dir,\n 'branch': branch}\n run(cmd)\n\n\nclass Collectd(object):\n\n def __init__(self, version):\n files.sed('/etc/systemd/system/collectdcontainer.service',\n 'collectd-next:5.+',\n version,\n use_sudo=True,\n backup='.bak')\n\n\nclass Kubernetes(object):\n\n def __init__(self, what):\n '''\n To use this class you need to setup some environ variables:\n - KUBECTL_STRING with your kubectl envrinon.\n - KUBE_PRODUCTION_DNS a comma separated list with the DNSs used by your kubernetes cluster.\n export KUBE_PRODUCTION_DNS = '10.10.10.10, 10.10.10.20'\n '''\n try:\n self.kubectl = os.environ['KUBECTL_STRING']\n except Exception as e:\n print('Something goes wrong sourcing KUBECTL_STRING ERROR %s' % e)\n print('''\n please set the KUBECTL_STRING environment string\n as something like:\n export KUBECTL_STRING='kubectl --context production'\n export KUBECTL_STRING='kubectl --context qa'\n export KUBECTL_STRING='kubectl --context sre'\n ''')\n return\n\n self.dns_addresses = os.environ.get('KUBE_PRODUCTION_DNS', None)\n self.domain = os.environ.get('KUBE_PRODUCTION_DOMAIN', None)\n\n self.data = StackData()\n\n if what == 'master':\n self.is_master = True\n else:\n self.is_master = False\n print('Is it a master?: %s' % self.is_master)\n\n def there_is_etcd(self, host):\n etcdTools = EtcdTools()\n # print(self.dns_addresses)\n if etcdTools.is_etcd_hosted_on_host(host, self.domain, self.dns_addresses):\n print('This node should hosts etcd according to the SRV record')\n return True\n else:\n print('This node is not present in the ETCD SRV record')\n return False\n\n def uncordonServer(self, host):\n if not self.is_master:\n if kubeManage.uncordon(host):\n print('Node uncordoned')\n else:\n print('Error during uncordoning')\n print(\"I'll exit before make errors! Please check the node: %s\" % host)\n sys.exit()\n else:\n print('Uncordon not needed')\n\n def restartDocker(self, host):\n kubeManage = KubeManage(self.kubectl)\n kubeManage.drain(host)\n systemd = Systemd()\n if kubeManage.is_node_drained(host):\n systemd.stop('kubelet')\n systemd.restart('docker')\n systemd.start('kubelet')\n kubeManage.uncordonServer(host, self.is_master)\n return False\n\n def reboot(self, host):\n kubeManage = KubeManage(self.kubectl)\n kubeManage.drain(host)\n ping = Ping()\n etcdTools = EtcdTools()\n if kubeManage.is_node_drained(host):\n with settings(abort_exception=FabricException):\n try:\n print('Reboot the server... I will lost the connection')\n reboot()\n except:\n print('Lost connection, server rebooting')\n if ping.check_until_down(host):\n if not self.is_master:\n if kubeManage.uncordon(host):\n print('Node uncordoned')\n else:\n print('Error during uncordoning')\n print(\"I'll exit before make errors! Please check the node: %s\" % host)\n sys.exit()\n else:\n print('Uncordon not needed')\n if ping.check_until_up(host):\n if etcdTools.is_etcd_hosted_on_host(host, self.domain, self.dns_addresses):\n wait = 60\n else:\n wait = 30\n print('Host UP, sleep for: %s second waiting for all services.' % wait)\n sleep(wait)\n print('Assuming host Up&Running')\n return True\n else:\n print(\"I'll exit before make errors! Please check the node: %s\" % host)\n sys.exit()\n return False\n\n def getpods(self, namespace, eldest='0s'):\n '''\n Get all pods for a namespaces, this function is intended only for debug\n '''\n kubeManage = KubeManage(self.kubectl)\n pods = kubeManage.get_pod_for_namespace(namespace, eldest)\n for pod in pods:\n print(' %s %s %s %s %s' % (pod.name,\n pod.ready,\n pod.status,\n pod.restarts,\n pod.age))\n\n def deletepods(self, namespace, sleep=20, parallel=3, eldest='0s'):\n '''\n delete all pods in a namespace, in set of 'parallel'.\n Deletions will be performed serialized but k8s will works in parallel.\n '''\n kubeManage = KubeManage(self.kubectl)\n # namespaces = kubeManage.get_namespaces()\n pods = kubeManage.get_pod_for_namespace(namespace, eldest)\n num_of_pods = len(pods)\n parallel = int(parallel)\n print('Starting to delete %s pods, parallelism %s' % (len(pods), parallel))\n while pods:\n occurrence = 0\n while occurrence != parallel:\n try:\n pod = pods.pop()\n num_of_pods -= 1\n print('deleting pod %s, ramaining %s pods' % (pod.name, num_of_pods))\n kubeManage.delete_pod(pod.name, namespace)\n except Exception as e:\n print('%s - This is the last run, after I will exit' % e)\n break\n occurrence += 1\n terminating = kubeManage.get_num_of_terminating_pod(namespace, 1)\n creating = kubeManage.get_num_of_creating_pod(namespace, 1)\n while not(terminating <= 0) or not(creating <= 0):\n terminating = kubeManage.get_num_of_terminating_pod(namespace, 1)\n creating = kubeManage.get_num_of_creating_pod(namespace, 1)\n print(' - ', 'searching for other pods', sep=\"\\n\")\n print('end all')\n\n def run_puppet_task(self, host):\n puppet = Puppet()\n systemd = Systemd()\n kubeManage = KubeManage(self.kubectl)\n kubeManage.drain(host)\n systemd = Systemd()\n if kubeManage.is_node_drained(host):\n puppet_run_result = puppet.run()\n if puppet_run_result is True:\n if systemd.is_masked('puppet'):\n systemd.unmask('puppet')\n systemd.start('puppet')\n self.reboot(host)\n else:\n print('Puppet exit with: %s', puppet_run_result)\n\n\n def upgrade(self, package_version, host, pin_suffix=None):\n kubeManage = KubeManage(self.kubectl)\n # kubeManage.drain(host)\n kubepackages = self.data.kubepackages\n pin = Pin()\n apt = Apt()\n systemd = Systemd()\n pinning_path = self.data.getpath('pin')\n if kubeManage.is_node_drained(host):\n\n for package in kubepackages:\n is_service = kubepackages.get(package).get('is_service')\n if is_service:\n systemd.stop(package)\n\n for package in kubepackages:\n pin.put(pinning_path, package, package_version)\n\n apt.update()\n\n for package in kubepackages:\n\n if not kubepackages.get(package).get('only_for_master'):\n apt.install(package)\n elif self.is_master:\n apt.install(package)\n\n try:\n print('configure git')\n SetGit(self.data.path.get('manifest'),\n package_version)\n except Exception as e:\n abort('Something goes wrong with git ERROR: %s' % e)\n\n for package in kubepackages:\n is_service = kubepackages.get(package).get('is_service')\n if is_service:\n if systemd.start(package):\n if not self.is_master:\n if kubeManage.uncordon(host):\n return True\n else:\n return True\n return False\n","sub_path":"kubernetes_task.py","file_name":"kubernetes_task.py","file_ext":"py","file_size_in_byte":11898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"219955610","text":"import fileinput as fi\nimport re\nimport itertools as it\nimport functools as ft\nimport string\nimport collections as cs\nimport collections.abc as abc\nimport math\nimport sys\nimport heapq\n\nimport typing\n\n# findall, search, parse\nfrom parse import *\nimport more_itertools as mit\n# import z3\n# import numpy as np\n# import lark\n# import regex\n# import intervaltree as itree\nfrom bidict import bidict\nimport tqdm\n\n# print(sys.getrecursionlimit())\nsys.setrecursionlimit(6500)\n\n# Debug logging\nDEBUG = False\ndef gprint(*args, **kwargs):\n if DEBUG: print(*args, **kwargs)\n\npositionT = tuple[int,int]\ndef ortho(y: int, x: int, shape: positionT) -> abc.Iterator[positionT]:\n \"\"\"Returns all orthagonaly adjacent points, respecting boundary conditions\"\"\"\n sy, sx = shape\n if 0 < x: yield (y, x-1)\n if x < sx-1: yield (y, x+1)\n if 0 < y: yield (y-1, x)\n if y < sy-1: yield (y+1, x)\n\ndef adj(y: int, x: int, shape: positionT) -> abc.Iterator[positionT]:\n \"\"\"Returns all points around a point, given the shape of the array\"\"\"\n sy, sx = shape\n for dy,dx in it.product([-1,0,1], [-1,0,1]):\n if dy == 0 and dx == 0:\n continue\n\n py = y + dy\n px = x + dx\n\n if 0 <= px < sx and 0 <= py < sy:\n yield (py,px)\n\n\n# Input parsing\nINPUT = \"\".join(fi.input()).rstrip()\ngroups = INPUT.split(\"\\n\\n\")\nlines = list(INPUT.splitlines())\nnumbers = [list(map(int, re.findall(\"-?[0-9]+\", line))) for line in lines]\npos_numbers = [list(map(int, re.findall(\"[0-9]+\", line))) for line in lines]\ngrid = [[c for c in line] for line in lines]\ngsz = (len(grid), len(grid[0]))\n\ndef solve():\n nums = []\n\n # orig_spot -> now_spot\n stored = bidict()\n for (i,line) in enumerate(lines):\n nums.append(int(line))\n stored[i] = i\n\n # LOOPS = 1\n # DEC_KEY = 1\n LOOPS = 10\n DEC_KEY = 811589153\n nums = [x*DEC_KEY for x in nums]\n orig_nums = tuple(nums)\n \n zero_index = nums.index(0)\n N = len(orig_nums)\n for TT in range(LOOPS):\n # zero_pos_cur = stored[zero_index]\n # now_lineup = [orig_nums[stored.inverse[(i + zero_pos_cur) % N]] for i in range(0, len(orig_nums))]\n # # now_lineup = [orig_nums[stored.inverse[(i + zero_index) % N]] for i in range(0, len(orig_nums))]\n # print(\"BEFORE {}:\".format(TT), now_lineup)\n for orig_spot, num in tqdm.tqdm(enumerate(orig_nums), total=len(orig_nums)):\n # for orig_spot, num in enumerate(orig_nums):\n # now_lineup = [orig_nums[stored.inverse[i]] for i in range(0, len(orig_nums))]\n # gprint(now_lineup)\n # REAL_BEFORE_SPOT = stored[orig_spot]\n # ONUM = num\n \n if num < 0:\n wnum = -(-num % (N-1))\n else:\n wnum = num % (N-1)\n \n # print(num, wnum)\n num = wnum\n\n if num < 0:\n rem = num \n while rem < 0:\n if stored[orig_spot] == 1: # special case\n target = N-1\n my_orig = stored.pop(orig_spot)\n for i in range(my_orig+1, target+1):\n adx = stored.inverse[i]\n stored[adx] -= 1\n\n stored[orig_spot] = target\n\n elif stored[orig_spot] == 0: # special case\n cur_spot = stored.pop(orig_spot)\n target_orig = stored.inverse.pop(N-1)\n stored[target_orig] = cur_spot\n stored[orig_spot] = N-1\n\n else:\n cur_spot = stored.pop(orig_spot)\n target_orig = stored.inverse.pop(cur_spot-1)\n stored[target_orig] = cur_spot\n stored[orig_spot] = cur_spot-1\n\n \n rem += 1\n elif num == 0:\n pass\n else:\n rem = num % N\n while 0 < rem:\n if stored[orig_spot] == N-1:\n cur_spot = stored.pop(orig_spot)\n for i in reversed(range(1, cur_spot)):\n adx = stored.inverse[i]\n stored[adx] += 1\n\n stored[orig_spot] = 1\n\n else:\n cur_spot = stored.pop(orig_spot)\n target_orig = stored.inverse.pop(cur_spot+1)\n stored[target_orig] = cur_spot\n stored[orig_spot] = cur_spot+1\n # now_lineup = [orig_nums[stored.inverse[i]] for i in range(0, len(orig_nums))]\n # print(now_lineup)\n\n\n\n rem -= 1\n # REAL_AFTER_SPOT = stored[orig_spot]\n\n # print(ONUM, REAL_BEFORE_SPOT, REAL_AFTER_SPOT)\n\n # now_lineup = [orig_nums[stored.inverse[i]] for i in range(0, len(orig_nums))]\n # gprint(now_lineup)\n # gprint(\"\")\n\n # now_lineup = [orig_nums[stored.inverse[i]] for i in range(0, len(orig_nums))]\n # zero_pos_cur = stored[zero_index]\n # now_lineup = [orig_nums[stored.inverse[(i + zero_pos_cur) % N]] for i in range(0, len(orig_nums))]\n # print(\"AFTER {}:\".format(TT), now_lineup)\n # print(\"\")\n\n # now_lineup = [orig_nums[stored.inverse[i]] for i in range(0, len(orig_nums))]\n # print(now_lineup)\n pwd = orig_nums.index(0)\n cur_idx_zero = stored[pwd]\n # print(cur_idx_zero)\n # print(orig_nums[stored.inverse[(cur_idx_zero + 1000) % len(orig_nums)]])\n # print(orig_nums[stored.inverse[(cur_idx_zero + 2000) % len(orig_nums)]])\n # print(orig_nums[stored.inverse[(cur_idx_zero + 3000) % len(orig_nums)]])\n ans = sum(orig_nums[stored.inverse[(cur_idx_zero + i) % len(orig_nums)]] for i in [1000, 2000, 3000])\n return ans\n\n\n\n\n\nprint(solve())\n","sub_path":"2022/20/y2022_d20_p02.py","file_name":"y2022_d20_p02.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"85180014","text":"import os\nimport sys\n\nfrom sqlalchemy import engine_from_config\n\nfrom pyramid.paster import (\n get_appsettings,\n setup_logging,\n )\n\nfrom blog.model import (\n DBSession,\n Base,\n User,\n Group,\n Post\n )\n\n\ndef insert_while_creating(session):\n me = User('Alexander', '1')\n session.add(me)\n he = User('Noname', '2')\n session.add(he)\n my_group = Group(me, 'first group')\n my_group.users.append(me)\n my_group.users.append(he)\n session.add(my_group)\n my_group2 = Group(he, 'cool group')\n my_group2.users.append(me)\n my_group2.users.append(he)\n session.add(my_group2)\n p = Post('Post', 'my post')\n session.add(p)\n p2 = Post('Post2', 'his post in group')\n session.add(p2)\n me.posts.append(p)\n he.posts.append(p2)\n my_group.posts.append(p2)\n p3 = Post('Post3', 'my post in group')\n session.add(p3)\n me.posts.append(p3)\n my_group.posts.append(p3)\n p4 = Post('Post4', 'my post 4')\n session.add(p4)\n me.posts.append(p4)\n session.commit()\n\n\ndef usage(argv):\n cmd = os.path.basename(argv[0])\n print('usage: %s \\n'\n '(example: \"%s development.ini\")' % (cmd, cmd))\n sys.exit(1)\n\n\nif __name__ == '__main__':\n argv = sys.argv\n if len(argv) != 2:\n usage(argv)\n config_uri = argv[1]\n setup_logging(config_uri)\n settings = get_appsettings(config_uri)\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n insert_while_creating(DBSession())\n","sub_path":"initialize_db.py","file_name":"initialize_db.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561597843","text":"import sys\ninput = sys.stdin.readline\n\ndef find(parent,x):\n if parent[x] != x:\n parent[x] = find(parent,parent[x])\n return parent[x]\n\ndef union(parent,a,b):\n a = find(parent,a)\n b = find(parent,b)\n if a < b :\n parent[b] = a\n else:\n parent[a] = b\n\nn,m = map(int,input().split())\nparent = [0] *(n+1)\nfor i in range(0,n+1):\n parent[i] = i\n\nedges = []\nresult = 0\nmax_cost = 0\n\nfor _ in range(m):\n a,b,cost = map(int,input().split())\n edges.append((cost,a,b))\n\nedges.sort()\n\nfor edge in edges:\n cost,a,b = edge\n if find(parent,a) != find(parent,b):\n union(parent,a,b)\n result+=cost \n max_cost = cost\n\nprint(result-max_cost) # 최소 신장 트리 2개 인데, 마지막 가장 큰 cost를 빼야 최소 유지비\n","sub_path":"Graph/10-3_Plan to divide the city.py","file_name":"10-3_Plan to divide the city.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"618524173","text":"# -*- encoding: utf-8 -*-\n#\n# Author: Endre Karlson \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nTest InvoiceState\n\"\"\"\n\nimport logging\n\nfrom billingstack.tests.api.v2 import V2Test\n\nLOG = logging.getLogger(__name__)\n\n\nclass TestInvoiceState(V2Test):\n __test__ = True\n path = \"invoice_states\"\n\n def setUp(self):\n super(TestInvoiceState, self).setUp()\n self.start_storage('biller')\n self.start_service('biller')\n\n def test_create_invoice_state(self):\n fixture = self.get_fixture('invoice_state')\n\n resp = self.post(self.path, fixture)\n\n self.assertData(fixture, resp.json)\n\n def test_list_invoice_states(self):\n self.create_invoice_state()\n\n resp = self.get(self.path)\n\n self.assertLen(1, resp.json)\n\n def test_get_invoice_state(self):\n _, state = self.create_invoice_state()\n\n url = self.item_path(state['name'])\n resp = self.get(url)\n\n self.assertData(resp.json, state)\n\n def test_update_invoice_state(self):\n _, state = self.create_invoice_state()\n\n url = self.item_path(state['name'])\n resp = self.patch_(url, state)\n\n self.assertData(resp.json, state)\n\n def test_delete_invoice_state(self):\n _, state = self.create_invoice_state()\n\n url = self.item_path(state['name'])\n self.delete(url)\n\n data = self.services.biller.list_invoice_states(self.admin_ctxt)\n self.assertLen(0, data)\n","sub_path":"billingstack/tests/api/v2/test_invoice_state.py","file_name":"test_invoice_state.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583275307","text":"from __future__ import print_function\nfrom configparser import ConfigParser\nimport sys\n\nclass config(object):\n\tdef __init__(self):\n\t\tconfig = ConfigParser()\n\t\tconfig.read(\"conf/DNS.ini\")\n\t\tself.th_cnt = config.getint('settings', 'thread_cnt')\n\t\tself.th_proc = config.getint('settings', 'thread_proc')\n\t\tself.th_wait = config.getint('settings', 'thread_wait')\n\t\tself.id = config.get('virtual_host', 'id')\n\t\tself.pw = config.get('virtual_host', 'pw')\n\t\tself.host = config.get('queue', 'host')\n\t\tself.port = config.getint('queue', 'port')\n\t\tself.get_queue = config.get('queue', 'get_queue')\n\t\tself.push_queue = config.get('queue', 'push_queue')\n\t\tself.name = config.get('queue', 'name')\n\t\tself.http_timeout = config.getint('network', 'http_timeout')\n\t\tself.dns_timeout = config.getint('network', 'dns_timeout')\n\n\tdef __del__(self):\n\t\tpass\n\n\tdef lprint(self, *args, **kwargs):\n\t\tprint(*args, file=sys.stderr, **kwargs)","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479019240","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef cos_sim(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n\n\nif __name__ == '__main__':\n csv = 'pixiv.csv'\n df = pd.read_csv(csv).fillna('null')\n\n tag_list = []\n for i,row in df.iterrows():\n tags = []\n for j in range(10):\n tag = row['tag'+str(j)]\n if tag != 'null':\n tags.append(tag)\n tag_str = ' '.join(tags)\n tag_list.append(tag_str)\n\n df['tag_str'] = tag_list\n\n test = ['艦これ','かわいい']\n test_str = ' '.join(test)\n\n vectorizer = TfidfVectorizer(use_idf=True)\n vecs = vectorizer.fit_transform(np.append(df['tag_str'].values,test_str))\n\n df['tag_vec'] = list(vecs.toarray()[:-1])\n test_vec = vecs.toarray()[-1]\n\n distance_list = []\n for i, row in df.iterrows():\n vec1 = test_vec\n vec2 = row['tag_vec']\n distance = cos_sim(vec1,vec2)\n distance_list.append(distance)\n\n df['distance'] = distance_list\n df = df.sort_values(by='distance',ascending=False)\n df = df[df['distance']<1.0][:10]\n\n all_tags = []\n for i, row in df.iterrows():\n tags = row['tag_str'].split(' ')\n for tag in tags:\n if tag not in all_tags and tag not in test:\n all_tags.append(tag)\n\n print(all_tags)","sub_path":"autotag.py","file_name":"autotag.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"491945089","text":"import tkinter as tk\nimport Aluno as al \nimport Curso as cr \nimport Grade as gr \nimport Disciplina as dic \nimport Historico as hist\nfrom tkinter import messagebox \n\nclass LimitePrincipal():\n def __init__(self, root, controle):\n self.controle = controle\n self.root = root\n self.root.geometry('600x450')\n self.menubar = tk.Menu(self.root) \n self.alunoMenu = tk.Menu(self.menubar)\n self.discipMenu = tk.Menu(self.menubar)\n self.gradeMenu = tk.Menu(self.menubar)\n self.cursoMenu = tk.Menu(self.menubar)\n self.historicoMenu = tk.Menu(self.menubar)\n self.sairMenu = tk.Menu(self.menubar) \n \n self.alunoMenu.add_command(label=\"Insere\", command=self.controle.insereAlunos)\n self.alunoMenu.add_command(label=\"Mostra\", command=self.controle.mostraAlunos)\n self.alunoMenu.add_command(label=\"Consulta\", command=self.controle.consultaAlunos)\n self.menubar.add_cascade(label=\"Aluno\", menu=self.alunoMenu)\n\n self.discipMenu.add_command(label=\"Insere\", command=self.controle.insereDisciplinas)\n self.discipMenu.add_command(label=\"Mostra\", command=self.controle.mostraDisciplinas)\n self.discipMenu.add_command(label=\"Consulta\", command=self.controle.consultaDisciplinas) \n self.menubar.add_cascade(label=\"Disciplina\", menu=self.discipMenu)\n\n self.gradeMenu.add_command(label=\"Insere\", command=self.controle.insereGrade)\n self.gradeMenu.add_command(label=\"Mostra\", command=self.controle.mostraGrade)\n self.gradeMenu.add_command(label=\"Consulta\", command=self.controle.consultaGrade) \n self.menubar.add_cascade(label=\"Grade\", menu=self.gradeMenu)\n\n self.cursoMenu.add_command(label=\"Insere\", command=self.controle.insereCursos)\n self.cursoMenu.add_command(label=\"Mostra\", command=self.controle.mostraCursos)\n self.cursoMenu.add_command(label=\"Consulta\", command=self.controle.consultaCursos) \n self.menubar.add_cascade(label=\"Curso\", menu=self.cursoMenu)\n\n self.historicoMenu.add_command(label=\"Insere\", command=self.controle.insereHistoricos)\n self.historicoMenu.add_command(label=\"Mostra\", command=self.controle.mostraHistoricos)\n self.historicoMenu.add_command(label=\"Consulta\", command=self.controle.consultaHistoricos) \n self.menubar.add_cascade(label=\"Histórico\", menu=self.historicoMenu)\n\n self.sairMenu.add_command(label=\"Salva\", command=self.controle.salvaDados)\n self.menubar.add_cascade(label=\"Sair\", menu=self.sairMenu)\n\n self.root.config(menu=self.menubar)\n \nclass ControlePrincipal(): \n def __init__(self):\n self.root = tk.Tk()\n\n self.ctrlAluno = al.CtrlAluno()\n self.ctrlDisciplina = dic.CtrlDisciplina()\n self.ctrlGrade = gr.CtrlGrade(self)\n self.ctrlCurso = cr.CtrlCurso(self)\n self.ctrlHistorico = hist.CtrlHistorico(self)\n \n self.limite = LimitePrincipal(self.root, self) \n\n self.root.title(\"Sistema Acadêmico YS\")\n self.root.mainloop()\n\n ############################################### \n def insereAlunos(self):\n self.ctrlAluno.insereAlunos(self.root)\n\n def mostraAlunos(self):\n self.ctrlAluno.mostraAlunos()\n \n def consultaAlunos(self):\n self.ctrlAluno.consultaAlunos(self.root)\n\n ###############################################\n def insereDisciplinas(self):\n self.ctrlDisciplina.insereDisciplinas(self.root)\n\n def mostraDisciplinas(self):\n self.ctrlDisciplina.mostraDisciplinas()\n\n def consultaDisciplinas(self):\n self.ctrlDisciplina.consultaDisciplinas(self.root)\n\n ###############################################\n def insereGrade(self):\n self.ctrlGrade.insereGrade(self.root)\n\n def mostraGrade(self):\n self.ctrlGrade.mostraGrade()\n \n def consultaGrade(self):\n self.ctrlGrade.consultaGrade(self.root)\n\n ###############################################\n def insereCursos(self):\n self.ctrlCurso.insereCursos(self.root)\n\n def mostraCursos(self):\n self.ctrlCurso.mostraCursos()\n \n def consultaCursos(self):\n self.ctrlCurso.consultaCursos(self.root)\n \n ###############################################\n def insereHistoricos(self):\n self.ctrlHistorico.insereHistoricos(self.root)\n\n def mostraHistoricos(self):\n self.ctrlHistorico.mostraHistoricos()\n \n def consultaHistoricos(self):\n self.ctrlHistorico.consultaHistoricos(self.root)\n\n ###############################################\n def salvaDados(self):\n self.ctrlAluno.salvaAlunos()\n self.ctrlDisciplina.salvaDisciplinas()\n self.ctrlGrade.salvaGrades()\n self.ctrlCurso.salvaCursos()\n self.ctrlHistorico.salvaHistoricos()\n messagebox.showinfo('Backup', 'Arquivos salvos com sucesso!')\n self.root.destroy()\n \nif __name__ == '__main__':\n c = ControlePrincipal()","sub_path":"TRABALHO FINAL/Trabalho Final Outra Versão/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3390571","text":"\"\"\"\nCount and Say\n\nThe count-and-say sequence is the sequence of integers beginning as follows:\n1, 11, 21, 1211, 111221, ...\n\n1 is read off as \"one 1\" or 11.\n11 is read off as \"two 1s\" or 21.\n21 is read off as \"one 2, then one 1\" or 1211.\nGiven an integer n, generate the nth sequence.\n\nNote: The sequence of integers will be represented as a string.\n\nSubscribe to see which companies asked this question\n\nHide Tags String\nHide Similar Problems (M) Encode and Decode Strings\n\n\n\"\"\"\n\nimport unittest\n\nclass Solution:\n # @param {int} n the nth\n # @return {string} the nth sequence\n def countAndSay(self, n):\n # Write your code here\n if n <= 0:\n return \"\"\n seq = \"1\"\n for i in range(1, n):\n tmp = []\n curchar = seq[0]\n cnt = 1\n for char in seq[1:]:\n if char == curchar:\n cnt += 1\n else:\n tmp.append(str(cnt))\n tmp.append(str(curchar))\n curchar = char\n cnt = 1\n tmp.append(str(cnt))\n tmp.append(str(curchar))\n new_seq = \"\".join(tmp)\n seq = new_seq\n return seq\n\nclass Solution1(object):\n def countAndSay(self, n): # 39ms, 97.92%\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n if n < 1:\n return \"\"\n current = \"1\"\n idx = 1\n while idx < n:\n next = \"\"\n char = current[0]\n cnt = 1\n for i in range(1,len(current)):\n if char == current[i]:\n cnt += 1\n if char != current[i]:\n next += str(cnt)+char\n char = current[i]\n cnt = 1\n next += str(cnt)+char\n current = next\n idx +=1\n return current\n\n\n\n\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self): # self test wrong, then correct it\n n = 5\n answer = \"111221\"\n result = self.sol.countAndSay(n)\n self.assertEqual(answer, result)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"freq/count_and_say.py","file_name":"count_and_say.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"494585283","text":"#!/usr/bin/env python3\n\"\"\"\n记账系统\n\"\"\"\nimport time\ndef shouru():\n #时间,余额\n ru=int(input(\"收入金额:\"))\n with open('/root/project/day05/qianbao.txt','rb') as fobj:\n yue=int(fobj.read())+ru\n mytime = time.localtime()\n print(\"%s-%s-%s\"&(mytime.tm_year,mytime.tm_mon,mytime.tm_mday))\n # zhangdan_list=\"%-10s %-10s %-10s %-10s %s\"%(mytime,ru,0,yue,shuoming)\n # with open('zhangdan.txt','wb') as fobj:\n # fobj.writelines(zhangdan_list)\n\ndef zhichu():\n print(\"zhichu\")\n\ndef chakan():\n print(\"chakan\")\n\ndef menu_list():\n mscmd={\"0\":shouru,\"1\":zhichu,\"2\":chakan}\n info=\"\"\"收入指出软件\n[0] shouru\n[1] zhichu\n[2] chakan\n\"\"\"\n choice=input(info).strip()[0]\n if choice not in '0/1/2':\n print(\"value error!\")\n exit(1)\n mscmd[choice]()\n\nif __name__ == '__main__':\n menu_list()","sub_path":"STEP05/project/python/day05/jizhang.py","file_name":"jizhang.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"458569254","text":"print(\"--> PCA Utilizando sklearn <--\")\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nX = np.array([\n [2.5, 2.4],\n [0.5, 0.7],\n [2.2, 2.9],\n [1.9, 2.2],\n [3.1, 3.0],\n [2.3, 2.7],\n [2.0, 1.6],\n [1.0, 1.1],\n [1.5, 1.6],\n [1.1, 0.9]\n])\n\nX = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\npca = PCA(n_components=2)\npca.fit(X) \nprint(pca.explained_variance_ratio_) \nprint(pca.singular_values_)\npca_transform = pca.fit_transform(X)\nprint(pca_transform)\n","sub_path":"sklearn_model.py","file_name":"sklearn_model.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"450165322","text":"# -*- coding: UTF-8 -*-\r\nfrom DNS_Resolver import DNSReslover\r\nimport requests\r\nfrom lxml import etree\r\nimport re\r\nimport Queue\r\nimport threading\r\n\r\n\r\nclass Crawler(threading.Thread):\r\n \"\"\"Class for crawler.\r\n It can grab the web information and extract the inner and the outer links\r\n and store the whole html file in database.\r\n You can call the run_crawler() function to run the crawler.\r\n Just use the attributes outer_link_set and inner_link_set getting link.\"\"\"\r\n q = Queue.Queue()\r\n dic = {}\r\n thread_lock = threading.RLock()\r\n\r\n def __init__(self, name):\r\n threading.Thread.__init__(self)\r\n self.name = name\r\n self.text = \"\"\r\n self.url = \"\"\r\n\r\n def run(self):\r\n \"\"\"This is for run a crawler.\"\"\"\r\n while True :\r\n if Crawler.q.empty():\r\n self.get_url()\r\n continue\r\n self.url = Crawler.q.get()\r\n response = self.get_web()\r\n while not response:\r\n self.url = Crawler.q.get()\r\n response = self.get_web()\r\n \r\n text = self.change_to_string(response)\r\n result = self.extract_link()\r\n outer_link = []\r\n inner_link = []\r\n self.handle_link( result, inner_link, outer_link)\r\n outer_link_set = set(outer_link)\r\n inner_link_set = set(inner_link)\r\n Crawler.dic[self.url] = inner_link_set\r\n\r\n def get_inner_links(self):\r\n \"\"\"The interface for inner links\"\"\"\r\n return Crawler.dic\r\n\r\n def get_url(self):\r\n \"\"\"This function is used for get url list from the left\"\"\"\r\n urls_dic = resolver.get_resolved_url_packet()\r\n for url in urls_dic.keys():\r\n if not urls_dic[url]:\r\n Crawler.dic[url] = 'fail'\r\n Crawler.q.put(urls_dic[url])\r\n\r\n def get_web(self):\r\n \"\"\"This function used for grab a web information and return a Response object.\"\"\"\r\n headers = {'User-Agent': 'XXX', 'Referer': 'XXX'}\r\n proxies = {'http': 'http://XX.XX.XX.XX:XXXX'}\r\n try:\r\n responses = requests.get(self.url)\r\n return responses\r\n except Exception:\r\n Crawler.dic[self.url] = \"fail\"\r\n return False\r\n\r\n def change_to_string(self, response):\r\n \"\"\"Change the Response object to string and return it\"\"\"\r\n html_text = etree.HTML(response.text)\r\n self.text = etree.tostring(html_text, pretty_print=True)\r\n\r\n domain_pattern = re.compile(r'http://[\\w+\\.]+|https://[\\w+\\.]+')\r\n http_domain = re.match(domain_pattern, self.url).group()\r\n split_pattern = re.compile('http://')\r\n domain_list = re.split(split_pattern, http_domain)\r\n change_pattern = re.compile('\\.')\r\n domain = re.sub(change_pattern, '_', domain_list[1])\r\n\r\n name = domain + '.txt'\r\n self.store_text(name)\r\n\r\n def store_text(self, name):\r\n \"\"\"This function is used for get the text of the web.\"\"\"\r\n Crawler.thread_lock.acquire()\r\n f = file(name, 'a+')\r\n f.write(self.text)\r\n f.close()\r\n Crawler.thread_lock.release()\r\n\r\n def extract_link(self):\r\n \"\"\"This function is used for extract all links from the web.\r\n Use the href attributes, Return the result.\"\"\"\r\n html_text = etree.HTML(self.text)\r\n results = html_text.xpath('//@href')\r\n return results\r\n\r\n def handle_link(self, results, inner_link_lists, outer_link_lists):\r\n \"\"\"The function is used for deal with the links.\r\n Distinct the inner links and outer links.\r\n For inner links, it should add the header and delete the tag#, remove .css and javascript link\"\"\"\r\n # distinct inner from outer link through the header http\r\n pattern = re.compile(r'http://|https://')\r\n # get the url domain to define the website\r\n domain_pattern = re.compile(r'http://[\\w+\\.]+|https://[\\w+\\.]+')\r\n domain = re.match(domain_pattern, self.url).group()\r\n # define the .css or javascript file\r\n useless_pattern = re.compile(r'/|javascript|\\S*.css')\r\n # define the tag#\r\n tag_pattern = re.compile(r'\\S*#\\S*')\r\n for element in results:\r\n match = re.match(pattern,element)\r\n if match: # begin with http\r\n test_domain = re.match(domain_pattern, element).group() # test the header for spcific definition\r\n if test_domain != domain:\r\n outer_link_lists.append(match.string)\r\n else: # same domain\r\n inner_link_lists.append(element)\r\n else: # not begin with http\r\n test_inner = re.match(useless_pattern,element) # test if it's a css or javascript file\r\n if not test_inner:\r\n test_tag = re.match(tag_pattern,element)\r\n if test_tag: # test if it's a page tag#\r\n pass\r\n else:\r\n link = domain + '/' + element\r\n inner_link_lists.append(link)\r\n else:\r\n pass\r\n\r\n\r\ndef run_crawler():\r\n thread1 = Crawler(\"thread1\")\r\n thread2 = Crawler(\"thread2\")\r\n thread3 = Crawler(\"thread3\")\r\n thread2.start()\r\n thread1.start()\r\n thread3.start()\r\n thread2.join()\r\n thread3.join()\r\n thread1.join()\r\n\r\nresolver = DNSReslover()\r\nrun_crawler()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"219379123","text":"import asyncio\nimport os\nimport urllib.parse\nfrom copy import copy\nfrom typing import AsyncGenerator, Awaitable\n\nimport psycopg2\nfrom psycopg2.extensions import cursor\nimport pytest\n\nimport lightbus\nimport lightbus.path\nfrom lightbus import TransactionalEventTransport\nfrom lightbus.path import BusPath\nfrom lightbus.transports.base import TransportRegistry\nfrom lightbus.transports.redis import StreamUse\nfrom lightbus.transports.transactional import DbApiConnection\n\nif False:\n import aiopg\n\n\n@pytest.fixture()\ndef cursor_factory():\n\n class ErrorThrowingCursor(cursor):\n\n def __init__(self, conn, *args, **kwargs):\n self.conn = conn\n super().__init__(conn, *args, **kwargs)\n\n def execute(self, query, vars=None):\n result = super().execute(query, vars)\n\n for notice in self.conn.notices:\n level, message = notice.split(\": \")\n\n command = query.upper().split(\" \")[0]\n if level == \"WARNING\":\n raise psycopg2.Warning(\n f\"Postgres issued a warning. The best way to check \"\n f\"what is going on is to set log_statement=all in postgres \"\n f\"and view the output (this is how the tests/docker-compose.yaml \"\n f\"file is setup already). However, it is POSSIBLE this query may have \"\n f\"been at fault: {message.strip()}\"\n )\n\n return result\n\n return ErrorThrowingCursor\n\n\n@pytest.fixture()\ndef pg_url():\n return os.environ.get(\"PG_URL\", \"postgres://postgres@localhost:5432/postgres\")\n\n\n@pytest.fixture()\ndef pg_kwargs(pg_url):\n parsed = urllib.parse.urlparse(pg_url)\n assert parsed.scheme == \"postgres\"\n return {\n \"dbname\": parsed.path.strip(\"/\") or \"postgres\",\n \"user\": parsed.username or \"postgres\",\n \"password\": parsed.password,\n \"host\": parsed.hostname,\n \"port\": parsed.port or 5432,\n }\n\n\n@pytest.yield_fixture()\nasync def aiopg_connection(pg_kwargs):\n import aiopg\n\n connection = await aiopg.connect(**pg_kwargs)\n yield connection\n connection.close()\n\n\n@pytest.yield_fixture()\ndef aiopg_connection_factory(pg_kwargs):\n import aiopg\n\n connections = []\n\n async def factory():\n connection = await aiopg.connect(**pg_kwargs)\n connections.append(connection)\n return connection\n\n yield factory\n\n for connection in connections:\n connection.close()\n\n\n@pytest.yield_fixture()\ndef psycopg2_connection(pg_kwargs):\n import psycopg2\n\n connection = psycopg2.connect(**pg_kwargs)\n yield connection\n connection.close()\n\n\n@pytest.fixture()\nasync def aiopg_cursor(aiopg_connection, cursor_factory):\n cursor = await aiopg_connection.cursor(cursor_factory=cursor_factory)\n await cursor.execute(\"BEGIN -- aiopg_cursor\")\n await cursor.execute(\"DROP TABLE IF EXISTS lightbus_processed_events\")\n await cursor.execute(\"DROP TABLE IF EXISTS lightbus_event_outbox\")\n await cursor.execute(\"COMMIT -- aiopg_cursor\")\n return cursor\n\n\n@pytest.fixture()\ndef dbapi_database(aiopg_connection, aiopg_cursor):\n return DbApiConnection(aiopg_connection, aiopg_cursor)\n\n\ndef verification_connection() -> Awaitable[\"aiopg.Connection\"]:\n import aiopg\n\n return aiopg.connect(**pg_kwargs(pg_url()))\n\n\n@pytest.fixture()\ndef get_outbox():\n\n async def inner():\n async with verification_connection() as connection:\n async with connection.cursor() as cursor:\n await cursor.execute(\"SELECT * FROM lightbus_event_outbox\")\n return await cursor.fetchall()\n\n return inner\n\n\n@pytest.fixture()\ndef get_processed_events():\n\n async def inner():\n async with verification_connection() as connection:\n async with connection.cursor() as cursor:\n await cursor.execute(\"SELECT * FROM lightbus_processed_events\")\n return await cursor.fetchall()\n\n return inner\n\n\n@pytest.fixture()\ndef messages_in_redis(redis_client):\n\n async def inner(api_name, event_name):\n return await redis_client.xrange(f\"{api_name}.{event_name}:stream\")\n\n return inner\n\n\n@pytest.fixture()\ndef transactional_bus_factory(dummy_bus: BusPath, new_redis_pool):\n pool = new_redis_pool(maxsize=10000)\n\n async def inner():\n transport = TransactionalEventTransport(\n child_transport=lightbus.RedisEventTransport(\n redis_pool=pool,\n consumer_group_prefix=\"test_cg\",\n consumer_name=\"test_consumer\",\n stream_use=StreamUse.PER_EVENT,\n )\n )\n config = dummy_bus.client.config\n transport_registry = TransportRegistry().load_config(config)\n transport_registry.set_event_transport(\"default\", transport)\n client = lightbus.BusClient(config=config, transport_registry=transport_registry)\n bus = lightbus.path.BusPath(name=\"\", parent=None, client=client)\n return bus\n\n return inner\n\n\n@pytest.fixture()\ndef transactional_bus(dummy_bus: BusPath, new_redis_pool, aiopg_connection, aiopg_cursor):\n transport = TransactionalEventTransport(\n child_transport=lightbus.RedisEventTransport(\n redis_pool=new_redis_pool(maxsize=10000),\n consumer_group_prefix=\"test_cg\",\n consumer_name=\"test_consumer\",\n stream_use=StreamUse.PER_EVENT,\n )\n )\n registry = dummy_bus.client.transport_registry\n registry.set_event_transport(\"default\", transport)\n\n database = DbApiConnection(aiopg_connection, aiopg_cursor)\n # Don't migrate here, that should be handled by the auto-migration\n\n return dummy_bus\n\n\n@pytest.yield_fixture()\nasync def test_table(aiopg_cursor):\n await aiopg_cursor.execute(\"BEGIN -- test_table (setup)\")\n await aiopg_cursor.execute(\"DROP TABLE IF EXISTS test_table\")\n await aiopg_cursor.execute(\"CREATE TABLE test_table (pk VARCHAR(100))\")\n await aiopg_cursor.execute(\"COMMIT -- test_table (setup)\")\n\n class TestTable(object):\n\n async def total_rows(self):\n await aiopg_cursor.execute(\"SELECT COUNT(*) FROM test_table\")\n return (await aiopg_cursor.fetchone())[0]\n\n yield TestTable()\n\n await aiopg_cursor.execute(\"BEGIN -- test_table (tear down)\")\n await aiopg_cursor.execute(\"DROP TABLE test_table\")\n await aiopg_cursor.execute(\"COMMIT -- test_table (tear down)\")\n","sub_path":"tests/transactional_transport/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":6462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448518007","text":"from flask import jsonify, request, current_app, url_for, g\nfrom app.api_1_0.decorators import permission_required\nfrom ..models import Permission, Post\nfrom .errors import forbidden\nfrom app import db\nfrom . import api\n\n'''\nNote that the resources that were implemented enable a client to offer SUBSET of\nof functionality that is available through the web application. The list of supported\nresources could be expanded on need, such as expose followers...\n'''\n\n\n@api.route('/posts/')\ndef get_post(id):\n post = Post.query.get_or_404(id)\n return jsonify(post.to_json())\n\n\n@api.route('/posts/')\ndef get_posts():\n page = request.args.get('page', 1, type=int)\n pagination = Post.query.paginate(page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],\n error_out=False)\n posts = pagination.items\n prev = None\n if pagination.has_prev:\n prev = url_for('api.get_posts', page=page-1)\n next = None\n if pagination.has_next:\n next = url_for('api.get_posts', page=page+1)\n return jsonify({\n 'posts': [post.to_json() for post in posts],\n 'prev': prev,\n 'next': next,\n 'count': pagination.total\n })\n\n\n@api.route('/posts/', methods=['POST'])\n@permission_required(Permission.WRITE)\ndef new_post():\n '''\n Return a three-argument response, with the large argument as redirect link\n :return:\n '''\n # no need to do error catch here\n post = Post.from_json(request.json)\n # explicitly assign the author -> set author_id\n post.author = g.current_user\n db.session.add(post)\n db.session.commit()\n return jsonify(post.to_json()), 201, \\\n {'Location': url_for('api.get_post', id=post.id)} # url for the newly created resource\n\n\n@api.route('/posts/', methods=['PUT'])\n@permission_required(Permission.WRITE)\ndef edit_post(id):\n post = Post.query.get_or_404(id)\n if g.current_user != post.author and \\\n not g.current_user.can(Permission.ADMIN):\n return forbidden(\"insufficient permissions\")\n post.body = request.json.get('body', post.body)\n db.session.add(post)\n db.session.commit()\n return jsonify(post.to_json())\n","sub_path":"app/api_1_0/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585717416","text":"from payment_cards.tests.common import HolviTestCase\nfrom payment_cards.models import Transaction\nfrom payment_cards.logic import (\n TransactionWebhookHandler, SettlementHandler\n)\n\n\nclass TestSettlementTransaction(HolviTestCase):\n def setUp(self):\n super(TestSettlementTransaction, self).setUp()\n self.init_data = {\n \"transaction_type\": Transaction.PRESENTMENT_TYPE,\n \"card_id\": self.card.id,\n \"transaction_id\": \"1234ZORRO\",\n \"merchant_name\": \"SNEAKERS R US\",\n \"merchant_country\": \"US\",\n \"merchant_mcc\": \"5139\",\n \"billing_amount\": 1,\n \"billing_currency\": self.default_currency,\n \"transaction_amount\": \"100.00\",\n \"transaction_currency\": \"USD\"\n }\n\n def create_presented_transactions(self, num):\n init_data = self.init_data.copy()\n\n for ind in range(num):\n for transaction_type in (\n Transaction.AUTHORISATION_TYPE,\n Transaction.PRESENTMENT_TYPE):\n init_data['transaction_id'] = str(ind)\n init_data['transaction_type'] = transaction_type\n if transaction_type == Transaction.PRESENTMENT_TYPE:\n init_data['settlement_amount'] = 1\n init_data['settlement_currency'] = self.default_currency\n TransactionWebhookHandler(**init_data).handle()\n\n def test_presented_transactions_settled(self):\n num_presented_transactions = 2\n self.create_presented_transactions(\n num_presented_transactions)\n self.assertEqual(\n Transaction.objects.filter(\n status=Transaction.PRESENTED).count(),\n num_presented_transactions\n )\n self.assertEqual(\n Transaction.objects.filter(\n status=Transaction.PAYED_TO_SCHEME).count(),\n 0\n )\n SettlementHandler().settle()\n self.assertEqual(\n Transaction.objects.filter(\n status=Transaction.PRESENTED).count(),\n 0\n )\n self.assertEqual(\n Transaction.objects.filter(\n status=Transaction.PAYED_TO_SCHEME).count(),\n num_presented_transactions\n )\n","sub_path":"payment_cards/tests/test_settlement.py","file_name":"test_settlement.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583546758","text":"import os\nimport pytest\n\nfrom scrapers.commbuys_scraper import CommBuysScraper\n\n\ndef _abs_file(filename):\n \"\"\"Given a filename relative to the current file, returns the absolute filename\"\"\"\n absolute_current_dir = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(absolute_current_dir, filename)\n\n\ndef test_scrape_results_page():\n page_str = open(_abs_file('commbuys-results-page.html'), 'r').read()\n commbuys_scraper = CommBuysScraper()\n bid_ids = commbuys_scraper.scrape_results_page(page_str)\n assert len(bid_ids) == 25\n assert \"BD-17-1002-1003-001-14907\" in bid_ids\n assert \"BD-17-1022-DMH08-8210B-14821\" in bid_ids\n","sub_path":"bidwire/tests/test_commbuys_scraper.py","file_name":"test_commbuys_scraper.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379067790","text":"import unittest\nfrom unittest.mock import patch, Mock\nfrom django.test import TestCase\nfrom ..forms import (\n ItemForm, ExistingListItemForm, NewListForm,\n EMPTY_ITEM_ERROR, DUPLICATED_ITEM_ERROR\n)\nfrom ..models import List, Item\n\n\nclass ItemFormTest(TestCase):\n\n def test_form_reders_item_text_input(self):\n form = ItemForm()\n self.assertIn('placeholder=\"Enter a to-do item\"', form.as_p())\n self.assertIn('class=\"form-control input-lg\"', form.as_p())\n\n def test_form_validation_for_blank_items(self):\n form = ItemForm(data={'text': ''})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])\n\n def test_form_save(self):\n lst = List.objects.create()\n form = ExistingListItemForm(for_list=lst, data={'text': 'text'})\n new_item = form.save()\n self.assertEqual(new_item, Item.objects.all()[0])\n\n\nclass NewListFormTest(unittest.TestCase):\n\n @patch('lists.forms.List.create_new')\n def test_save_creates_new_list_from_post_data_if_user_not_authenticated(\n self, mock_List_create_new):\n user = Mock(is_authenticated=False)\n form = NewListForm(data={'text': 'new item text'})\n form.is_valid()\n form.save(owner=user)\n mock_List_create_new.assert_called_once_with(\n first_item_text='new item text'\n )\n\n @patch('lists.forms.List.create_new')\n def test_save_creates_new_list_with_owner_if_user_authenticated(\n self, mock_List_create_new):\n user = Mock(is_authenticated=True)\n form = NewListForm(data={'text': 'new item text'})\n form.is_valid()\n form.save(owner=user)\n mock_List_create_new.assert_called_once_with(\n first_item_text='new item text', owner=user\n )\n\n @patch('lists.forms.List.create_new')\n def test_save_returns_new_list_object(self, mock_List_create_new):\n user = Mock(is_authenticated=True)\n form = NewListForm(data={'text': 'new item text'})\n form.is_valid()\n response = form.save(owner=user)\n self.assertEqual(response, mock_List_create_new.return_value)\n\n\nclass ExistingListItemFormTest(TestCase):\n\n def test_form_renders_item_text_input(self):\n lst = List.objects.create()\n form = ExistingListItemForm(for_list=lst)\n self.assertIn('placeholder=\"Enter a to-do item\"', form.as_p())\n\n def test_form_validation_for_blank_items(self):\n lst = List.objects.create()\n form = ExistingListItemForm(for_list=lst, data={'text': ''})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])\n\n def test_form_validation_for_duplicate_items(self):\n lst = List.objects.create()\n Item.objects.create(list=lst, text='no twins')\n form = ExistingListItemForm(for_list=lst, data={'text': 'no twins'})\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors['text'], [DUPLICATED_ITEM_ERROR])\n","sub_path":"lists/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"563439000","text":"import json\nimport os\nfrom flask import request, _request_ctx_stack\nfrom functools import wraps\nfrom jose import jwt\nfrom urllib.request import urlopen\n# from decouple import config\n\n\nAUTH0_DOMAIN = 'amack.us.auth0.com'\nALGORITHMS = ['RS256']\nAPI_AUDIENCE = 'competition'\n\n# AUTH0_DOMAIN = os.environ['AUTH0_DOMAIN']\n# ALGORITHMS = os.environ['ALGORITHMS']\n# API_AUDIENCE = os.environ['API_AUDIENCE']\n# https://amack.us.auth0.com/authorize?audience=competition&response_type=token&client_id=20AyTE5FtHiIWxCVLLT0OkcmW9PTNwsz&redirect_uri=https://127.0.0.1:3000/\n\n\nclass AuthError(Exception):\n \"\"\" A standardized way to communicate auth failure modes\n Params:\n * error: description of error\n * status_code [type: int]: the HTTP status code\n \"\"\"\n\n def __init__(self, error, status_code):\n self.error = error\n self.status_code = status_code\n\n\ndef get_token_auth_header():\n \"\"\" get token header from auth header\n\n Params:\n * none\n\n - Checks if the auth header has exactly 2 parts.\n Looking for bearer and it's token.\n - If not 2 parts, raise 401 error\n - Checks if bearer token is present\n - If not present, raise 401 error\n \"\"\"\n\n # check if authorization is in request header\n if 'Authorization' not in request.headers:\n raise AuthError({\n 'code': 'no_authorization',\n 'description': 'No authorization present in headers.'\n }, 401)\n\n auth_header = request.headers['Authorization']\n header_parts = auth_header.split(' ')\n\n # check for bearer token\n if len(header_parts) != 2:\n raise AuthError({\n 'code': 'header_parts',\n 'description': 'more or less than 2 parts in auth_header.'\n }, 401)\n elif header_parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'no_bearer',\n 'description': 'Bearer token not present.'\n }, 401)\n\n return header_parts[1]\n\n\ndef check_permissions(permission, payload):\n \"\"\" Checks to see if permissions are present in JWT\n Params:\n - permission [type: str]: Defines user's permission\n - example: get:drink-details\n - payload [type: dict]: The decoded JWT\n\n - Checks if permissions are present in payload\n - if not, raise 400 error\n - Checks if specific permission asked for is present\n - if not, raise 401 error\n \"\"\"\n\n # checks if permissions field is present in payload\n if 'permissions' not in payload:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': 'Permissions not included in JWT'\n }, 400)\n\n # checks if specific permission asked for is present\n if permission not in payload['permissions']:\n raise AuthError({\n 'code': 'unauthorized',\n 'description': 'Permissions not found. Unauthorized.',\n 'error': 401\n }, 401)\n return True\n raise Exception('Not Implemented')\n\n\ndef verify_decode_jwt(token):\n \"\"\" Checks that jwt is valid and return decoded token in a dict\n Params:\n - token [type: str]: JWT passed from authentication\n\n - Raises [code#, code]:\n - [401, 'token_expired]: Token is expired\n - [401, 'invalid_claims]: Incorrect claims\n - [400, 'invalid_header]: unable to parse authentication token\n - [400, 'invalid_header]: Unable to find appropriate key\n\n \"\"\"\n # get the public key from auth0\n jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')\n jwks = json.loads(jsonurl.read())\n\n # get the data in header\n unverified_header = jwt.get_unverified_header(token)\n\n # get key\n rsa_key = {}\n if 'kid' not in unverified_header:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization malformed.'\n }, 401)\n\n for key in jwks['keys']:\n if key['kid'] == unverified_header['kid']:\n rsa_key = {\n 'kty': key['kty'],\n 'kid': key['kid'],\n 'use': key['use'],\n 'n': key['n'],\n 'e': key['e']\n }\n if rsa_key:\n try:\n # use the key to validate the JWT\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer='https://' + AUTH0_DOMAIN + '/'\n )\n return payload\n # token is experpied\n except jwt.ExpiredSignatureError:\n raise AuthError({\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, 401)\n # user does not have valid claim\n except jwt.JWTClaimsError:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': \"\"\"Incorrect claims.\n Please check the audience and issuer.\"\"\"\n }, 401)\n # token has error\n except Exception:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, 400)\n\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to find the appropriate key.'\n }, 400)\n\n\ndef requires_auth(permission=''):\n \"\"\" Gets the access token, verifies and decodes token,\n then checks specific permissions\n\n - Params:\n - permission [type: str]: the permission to check token for.\n Defined by route\n \"\"\"\n def requires_auth_decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n token = get_token_auth_header()\n payload = verify_decode_jwt(token)\n check_permissions(permission, payload)\n return f(payload, *args, **kwargs)\n return wrapper\n return requires_auth_decorator\n","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"240033049","text":"'''This problem was asked by Palantir.\n\nIn academia, the h-index is a metric used to calculate\n the impact of a researcher's papers. It is calculated as follows:\nA researcher has index h if at least h of her N papers \nhave h citations each. If there are multiple h satisfying \nthis formula, the maximum is chosen.\nFor example, suppose N = 5, and the respective citations \nof each paper are [4, 3, 0, 1, 5]. Then the h-index would\n be 3, since the researcher has 3 papers with at least 3 citations.\nGiven a list of paper citations of a researcher, calculate their h-index.\n\nh-index is found by returning the first\ncitation that is larger or equal to its \nindex.'''\ndef h_index(arr):\n\tarr.sort(reverse = True)\n\n\t#enumerate through the array and return index\n\tfor i, citation in enumerate(arr):\n\t\tif i >= citation:\n\t\t\treturn i\n# Tests\nprint(h_index([4, 3, 0, 1, 5]))\nprint (h_index([4, 1, 0, 1, 1]))\nprint (h_index([4, 4, 4, 5, 4]))","sub_path":"Solutions/day241.py","file_name":"day241.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374753697","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport os\nimport random\nimport zipfile\n\nimport numpy as np\nimport urllib\nimport tensorflow as tf\n\n\n# Step 1: Download the data.\nurl = 'http://mattmahoney.net/dc/'\n\n\ndef maybe_download(filename, expected_bytes):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\n\nfilename = maybe_download('text8.zip', 31344016)\n\n\n# Read the data into a list of strings.\ndef read_data(filename):\n \"\"\"Extract the first file enclosed in a zip file as a list of words.\"\"\"\n with zipfile.ZipFile(filename) as f:\n data = tf.compat.as_str(f.read(f.namelist()[0])).split()\n return data\n\n\nwords = read_data(filename)\nprint('Data size %d' % len(words))\n\n# Step 2: Build the dictionary and replace rare words with UNK token.\nvocabulary_size = 50000\n\n\ndef build_dataset(words):\n \"\"\"Process raw inputs into a dataset.\"\"\"\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count = unk_count + 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reverse_dictionary\n\n\ndata, count, dictionary, reverse_dictionary = build_dataset(words)\n\nprint('Most common words (+UNK)', count[:5])\nprint('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])\ndel words # Hint to reduce memory.\n\ndata_index = 0\n\n\n# Step 3: Function to generate a training batch for the skip-gram model.\ndef generate_batch(batch_size, skip_window):\n global data_index\n assert skip_window % 2 == 1\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n\n batch = np.ndarray(shape=(batch_size, span-1), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size):\n target = skip_window # target label at the center of the buffer\n target_to_avoid = [skip_window] # we only need to know the words around a given word, not the word itself\n\n col_idx = 0\n for j in range(span):\n if j == span//2:\n continue\n\n batch[i, col_idx] = buffer[j] # [skip_window] => middle element\n col_idx += 1\n labels[i, 0] = buffer[target]\n\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n\n assert batch.shape[0] == batch_size and batch.shape[1] == span-1\n return batch, labels\n\n\n''\n\nnum_steps = 100001\n\nbatch_size = 128\nembedding_size = 128 # Dimension of the embedding vector\nskip_window = 1 # How many words to consider left and right\nnum_skips = 2 # How many lines to reuse an input to generate a label\n\nvalid_size = 16 # Random set of words to evaluate similarity on\nvalid_window = 100 # Only pick dev samples in the head of the distribution\n# pick 16 samples in 100\nvalid_examples = np.array(random.sample(range(valid_window), valid_size // 2))\nvalid_examples = np.append(valid_examples, random.sample(range(1000, 1000 + valid_window), valid_size // 2))\nnum_sampled = 64 # Number of negative examples to sample.\n\ngraph = tf.Graph()\n\nwith graph.as_default(), tf.device('/cpu:0'):\n\n # Input data.\n train_dataset = tf.compat.v1.placeholder(tf.int32, shape=[batch_size, 2 * skip_window])\n train_labels = tf.compat.v1.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.compat.v1.constant(valid_examples, dtype=tf.int32)\n\n # Variables.\n # embedding, vector for each word in the vocabulary.\n embeddings = tf.Variable(tf.compat.v1.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n\n # Construct the variables for the softmax loss\n softmax_weights = tf.Variable(\n tf.compat.v1.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n embeds = None\n for i in range(2 * skip_window):\n embedding_i = tf.nn.embedding_lookup(embeddings, train_dataset[:, 1])\n print('embedding %d shape: %s' % (i, embedding_i.get_shape().as_list()))\n emb_x, emb_y = embedding_i.get_shape().as_list()\n if embeds is None:\n embeds = tf.reshape(embedding_i, [emb_x, emb_y, 1])\n else:\n embeds = tf.concat([embeds, tf.reshape(embedding_i, [emb_x, emb_y, 1])], 2)\n\n assert embeds.get_shape().as_list()[2] == 2 * skip_window\n print(\"Concat embedding size: %s\" % embeds.get_shape().as_list())\n avg_embed = tf.reduce_mean(embeds, 2, keepdims=False)\n print(\"Avg embedding size: %s\" % avg_embed.get_shape().as_list())\n\n # Compute the average softmax loss for the batch.\n loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, train_labels, avg_embed, num_sampled,\n vocabulary_size))\n\n # Construct the Adagrad optimizer using a learning rate of 1.0.\n optimizer = tf.compat.v1.train.AdagradOptimizer(1.0).minimize(loss)\n\n # Compute the cosine similarity between mini-batch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))\n\n # Add variable initializer.\n init = tf.compat.v1.global_variables_initializer()\n\n# Step 5: Begin training.\n\nwith tf.compat.v1.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n print('Initialized')\n\n average_loss = 0\n for step in range(num_steps):\n batch_data, batch_labels = generate_batch(batch_size, skip_window)\n feed_dict = {train_dataset: batch_data, train_labels: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += loss_val\n\n if step % 2000 == 0:\n if step > 0:\n average_loss /= 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print('Average loss at step ', step, ': ', average_loss)\n average_loss = 0\n\n # Note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = reverse_dictionary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k + 1]\n log_str = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = reverse_dictionary[nearest[k]]\n log_str = '%s %s,' % (log_str, close_word)\n print(log_str)\n final_embeddings = normalized_embeddings.eval()\n\n\n# Step 6: Visualize the embeddings.\n\n\ndef plot_with_labels(low_dim_embs, labels, filename='cbow_tsne.png'):\n assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'\n plt.figure(figsize=(18, 18)) # in inches\n for i, label in enumerate(labels):\n x, y = low_dim_embs[i, :]\n plt.scatter(x, y)\n plt.annotate(label,\n xy=(x, y),\n xytext=(5, 2),\n textcoords='offset points',\n ha='right',\n va='bottom')\n\n plt.savefig(filename)\n\n\ntry:\n # pylint: disable=g-import-not-at-top\n from sklearn.manifold import TSNE\n import matplotlib.pyplot as plt\n\n cbow_tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n plot_only = 500\n low_dim_embs = cbow_tsne.fit_transform(final_embeddings[:plot_only, :])\n labels = [reverse_dictionary[i] for i in range(plot_only)]\n plot_with_labels(low_dim_embs, labels)\n\nexcept ImportError:\n print('Please install sklearn, matplotlib, and scipy to show embeddings.')\n","sub_path":"CBOW.py","file_name":"CBOW.py","file_ext":"py","file_size_in_byte":9109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"6124858","text":"from django.urls import path, re_path\nfrom main import views\n\n\nurlpatterns = [\n path('', views.index),\n path('todo/', views.todo, name='todo'),\n path('todo/1/completed',views.todo_completed),\n path('done_task/', views.done_task, name=\"done_task\"),\n]\n","sub_path":"Lab_5/TODO/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138411432","text":"import socket, threading\n\nclass ChatServer():\n\n def __init__(self, port, host = 'localhost', keyword = '¬'):\n self.port = port\n self.host = host\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.s.bind((self.host, self.port))\n except socket.error:\n print('Bind failed %s' % (socket.error))\n sys.exit()\n\n self.s.listen(5)\n\n def response(self, conn, addr):\n print('Client connected with ' + addr[0] + ':' + str(addr[1]))\n reply = ''\n while reply != '¬':\n data = conn.recv(1024)\n print('Cliente {}: '.format(str(addr[1])),data.decode())\n reply = input('... ')\n conn.sendall(reply.encode()) \n \n print('Servidor cerrado')\n conn.close() \n quit()\n\n def start(self):\n print('[Esperando conexión ...]')\n\n while True:\n conn,addr = self.s.accept()\n threading.Thread(target=self.response, args=(conn, addr)).start()\n\n\n\nif __name__ == '__main__':\n PORT = 12220\n server = ChatServer(PORT)\n\n server.start()\n\n\n ","sub_path":"E2_socket_chat/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"619182922","text":"import rules\nimport pytest\nfrom utiltest import *\n\n\ndef test_hello_world():\n assert 1 == 1\n\n\ndef test_load_json_from_string():\n rule_path = get_rule_file('test_load_json_from_string_rules.json')\n content = str(read_file(rule_path))\n ruleset = rules.parse_string(content)\n assert len(ruleset.rules) == 1\n\n\ndef test_load_json_from_file():\n rule_path = get_rule_file('test_load_json_from_file_rules.json')\n ruleset = rules.parse_file(rule_path.as_posix())\n assert len(ruleset.rules) == 1\n\n\ndef test_load_json_with_include():\n rule_path = get_rule_file('test_load_json_with_include_rules_1.json')\n ruleset = rules.parse_file(rule_path.as_posix())\n assert len(ruleset.rules) == 1\n\n\n# Should only include like 2 files, if this takes longer than 10\n# seconds and \"runs as it should\" then we have bigger problems....\n@pytest.mark.timeout(10)\ndef test_circular_include():\n rule_path = get_rule_file('test_load_circular_import_1.json')\n ruleset = rules.parse_file(rule_path.as_posix())\n assert len(ruleset.rules) == 2\n\n\ndef test_only_towards_necessary():\n rule_path = get_rule_file('test_only_towards_necessary.json')\n ruleset = rules.parse_file(rule_path.as_posix())\n assert len(ruleset.rules) == 1\n\n rule = ruleset.rules[0]\n assert rule.towards == 'only-towards-necessary'\n assert rule.points\n assert rule.needs\n\n\ndef test_inspect_simple_rule():\n rule_path = get_rule_file('test_inspect_simple_rule.json')\n ruleset = rules.parse_file(rule_path.as_posix())\n assert len(ruleset.rules) == 1\n\n rule = ruleset.rules[0]\n assert rule.needs == \"inspect-need\"\n assert rule.towards == \"inspect-towards\"\n assert rule.points == 1\n assert rule.deadline == \"01-01-2017 08:00\"\n assert rule.name == \"Inspect Rule\"\n assert rule.late == \"halved\"\n\n\ndef test_inspect_complex_rule():\n rule_path = get_rule_file('test_inspect_complex_rule.json')\n ruleset = rules.parse_file(rule_path.as_posix())\n assert len(ruleset.rules) == 1\n\n need_expression = {\n \"OR\": [\n False,\n {\n \"positive\": {\n \"get\": \"some-other-goal\"\n }\n },\n {\n \">\": {\n \"lhs\": 4,\n \"rhs\": 2\n }\n }\n ]\n }\n\n value_expression = {\n \"+\": [\n 1,\n 2,\n 3,\n {\n \"*\": [\n 4,\n 2\n ]\n },\n {\n \"-\": [\n 4,\n 3,\n 1\n ]\n },\n {\n \"/\": [\n 42,\n 21\n ]\n },\n {\n \"get\": \"another-goal\"\n },\n {\n \"MAX\": [1, 2, 1]\n }\n ]\n }\n\n rule = ruleset.rules[0]\n assert rule.needs == need_expression\n assert rule.towards == \"inspect-complex-towards\"\n assert rule.points == value_expression\n","sub_path":"test/test_rules.py","file_name":"test_rules.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52721581","text":"############################# IMPORT LIBRARIES ############################\nimport numpy as np\nimport cv2\nimport math\nimport imgaug.augmenters as iaa\nimport re\nimport imutils\n\nimport torch\nfrom torch.autograd import Variable\n\n######################### Custom Vein Loss #########################\n####################################################################\n\nclass Vein_loss_class(torch.nn.Module):\n def __init__(self, cropped_fldr, bounding_box_folder, data_folder):\n super(Vein_loss_class, self).__init__()\n self.bounding_box_folder = bounding_box_folder\n self.cropped_fldr = cropped_fldr\n self.data_folder = data_folder\n self.height = 90\n self.width = 70\n self.th = 10\n self.thresh_h = 200 # 150 # \n self.thresh_l = 70 # 85 # \n\n def get_vein_img(self, save_vein_pic = True,\n save_bb = True):\n crop = []\n for sample in range(0, self.total_input): \n\n # Error removing for augmented data---------------------\n file, point, point_pred = str(self.img_name[sample]), self.output[sample], self.target[sample]\n if((file.find('_flrot_') != -1) | (file.find('_flrotVera_') != -1)):\n point1 = np.array(point[0:2])\n point2 = np.array(point[2:4])\n point_changed = []\n point_changed.append(point2)\n point_changed.append(point1)\n self.output[sample] = np.array(point_changed).reshape((1, 4))\n\n point1 = np.array(point_pred[0:2])\n point2 = np.array(point_pred[2:4])\n point_changed = []\n point_changed.append(point2)\n point_changed.append(point1)\n self.target[sample] = np.array(point_changed).reshape((1, 4))\n # -------------------------------------------------------\n \n top_left = self.output[sample, 0:2]\n top_right = self.output[sample, 2:4]\n \n # Find the angle to rotate the image\n angle = (180/np.pi) * (np.arctan((top_left[1] - top_right[1])/\n (top_left[0] - top_right[0])))\n \n # Rotate the image to cut rectangle from the images\n points_pred = (self.output[sample]).reshape((1, 2, 2))\n points_test = (self.target[sample]).reshape((1, 2, 2))\n img = cv2.imread(self.data_folder + self.img_name[sample])\n # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n image = []\n image.append(img)\n image = np.array(image)\n image_rotated , keypoints_pred_rotated = iaa.Affine(rotate=-angle)(images=image, \n keypoints=points_pred)\n _ , keypoints_test_rotated = iaa.Affine(rotate=-angle)(images=image, \n keypoints=points_test)\n \n # Check if the image is fully rotated that left goes to the right side of hand\n if(keypoints_pred_rotated[0, 0, 0] > keypoints_pred_rotated[0, 1, 0]):\n # Again rotate the picture to 180 with the points\n image = image_rotated\n image_rotated , keypoints_pred_rotated = iaa.Affine(rotate=180)(images=image, \n keypoints=keypoints_pred_rotated)\n _ , keypoints_test_rotated = iaa.Affine(rotate=180)(images=image, \n keypoints=keypoints_test_rotated)\n\n image_rotated = image_rotated[0]\n keypoints_pred_rotated = keypoints_pred_rotated.reshape((2, 2))\n keypoints_test_rotated = keypoints_test_rotated.reshape((2, 2))\n \n # Rotated Points\n top_left = keypoints_pred_rotated[0]\n top_left[0] = top_left[0] - self.th\n top_right = keypoints_pred_rotated[1]\n top_right[0] = top_right[0] + self.th\n self.width = int(abs(top_right - top_left)[0])\n self.height = int(self.width * (90/80))\n centre = tuple([top_left[0] + int(self.width/2), top_left[1] + int(self.height/2)])\n\n # Crop the Vein Image\n cropped = cv2.getRectSubPix(image_rotated, (self.width, self.height), \n centre)\n crop.append(cropped)\n if(save_vein_pic):\n cv2.imwrite(self.cropped_fldr + self.img_name[sample], cropped)\n \n # Draw Predicted Troughs\n points = keypoints_pred_rotated.reshape((2, 2)) \n color = [(255, 255, 255), (0, 0, 0)] # Left - White, # Right - Black\n count = 0 \n for point in points: \n point = np.array(point).astype(int)\n cv2.circle(image_rotated, (point[0], point[1]), \n 5, color[count], -1)\n count += 1\n \n # Draw Actual Troughs\n points = keypoints_test_rotated.reshape((2, 2)) \n for point in points: \n point = np.array(point).astype(int)\n cv2.circle(image_rotated, (point[0], point[1]), \n 5, (255, 0, 0), -1)\n\n bottom_right = [int(top_left[0] + self.width) , int(top_left[1] + self.height)]\n\n # Draw Bounding Boxes and Save the image\n image_rotated = cv2.rectangle(image_rotated, tuple(top_left), tuple(bottom_right) , (0,0,0), 2)\n if(save_bb):\n cv2.imwrite(self.bounding_box_folder + self.img_name[sample], \n image_rotated)\n crop = np.array(crop)\n return crop\n \n def forward(self,target, output, input, img_name, ids):\n \n self.target = target.cpu().numpy()\n self.output = output.cpu().data.numpy()\n self.input = input.cpu().numpy()\n self.id = ids.cpu().numpy()\n self.id = np.array(self.id, dtype = 'int32')\n self.img_name = img_name\n self.total_input = len(self.id)\n\n vein_image = self.get_vein_img()\n vein_loss = 0\n # Calculate loss from extracted Vein Image\n loss_logger = []\n names = []\n for sample in range(0, self.total_input):\n image = cv2.cvtColor(vein_image[sample], cv2.COLOR_RGB2GRAY)\n accu = ((image <= self.thresh_h) & (image >= self.thresh_l))\n true = np.count_nonzero(accu)\n false = (accu.shape[0] * accu.shape[1]) - true\n loss = Variable(torch.tensor((false / (false + true))), requires_grad=True)\n vein_loss += loss\n loss_logger.append(loss)\n names.append(self.img_name[sample])\n\n self.loss_logger = loss_logger\n self.names = names\n\n vein_loss = vein_loss / self.total_input\n \n return vein_loss * 100\n\nif __name__ == \"__main__\":\n pass\n # main()\n","sub_path":"Train_only_Bosphorous/veinloss.py","file_name":"veinloss.py","file_ext":"py","file_size_in_byte":6948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"630478407","text":"# 0722 과제\r\n\r\n##############과제##############\r\n# keras04_homework.py\r\n# x_train = 1~100\r\n# y_trian = 501 ~ 600\r\n# x_test = 1001 ~ 1100\r\n# y_test = 1101 ~ 1200\r\n# 위 데이터를 이용하여 모델 만들기\r\n################################\r\n\r\n# 데이터\r\nimport numpy as np\r\n\r\nx_train = np.arange(1,101)\r\ny_train = np.arange(501,601)\r\nx_test = np.arange(1001,1101)\r\ny_test = np.arange(1101,1201)\r\n\r\n# x_train_mean = int(np.mean(x_train))\r\n# y_train_mean = int(np.mean(y_train))\r\n\r\n# x_test_mean = int(np.mean(x_test))\r\n# y_test_mean = int(np.mean(y_test))\r\n\r\n\r\n\r\n\r\n# input_data_x = np.array([x_train_mean, x_test_mean])\r\n# input_data_y = np.array([y_train_mean, y_test_mean])\r\n\r\n# print(input_data_x)\r\n# print(input_data_y)\r\n# print(x_train)\r\n# print(y_train)\r\n# print(x_test)\r\n# print(y_test)\r\n\r\n\r\n# 모델 구성\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\nmodel = Sequential()\r\n\r\nmodel.add(Dense(30,input_dim=1, activation='relu'))\r\nmodel.add(Dense(24))\r\nmodel.add(Dense(14))\r\nmodel.add(Dense(25))\r\nmodel.add(Dense(1))\r\n\r\n\r\n# 훈련\r\nmodel.compile(loss='mse',optimizer='adam', metrics=['accuracy'])\r\nmodel.fit(x_train, y_train,epochs=1000)\r\n\r\n# 평가\r\nlose, acc = model.evaluate(x_test, y_test, batch_size=1)\r\nprint('acc:', acc)\r\n\r\ny_predict = model.predict(x_test)\r\nprint(y_predict)\r\n","sub_path":"keras/0722/keras04_homework.py","file_name":"keras04_homework.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"591960416","text":"\"\"\"\nColumbia W4111 Intro to databases\nExample webserver\nTo run locally\n python server.py\nGo to http://localhost:8111 in your browser\nA debugger such as \"pdb\" may be helpful for debugging.\nRead about it online.\n\"\"\"\n\nimport os\nimport random\nfrom sqlalchemy import *\nfrom sqlalchemy.pool import NullPool\nfrom flask import Flask, request, render_template, g, redirect, Response\nfrom flask import Flask, flash, redirect, render_template, request, session, abort\nfrom flask import jsonify\nimport csv\nimport datetime\n\ntmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\napp = Flask(__name__, template_folder=tmpl_dir)\n\n\n\n# XXX: The Database URI should be in the format of:\n#\n# postgresql://USER:PASSWORD@/\n#\n# For example, if you had username ewu2493, password foobar, then the following line would be:\n#\n# DATABASEURI = \"postgresql://ewu2493:foobar@/postgres\"\n#\n# For your convenience, we already set it to the class database\n\n# Use the DB credentials you received by e-mail\nDB_USER = \"hy2574\"\nDB_PASSWORD = \"q74ml90c\"\n\nDB_SERVER = \"w4111.cisxo09blonu.us-east-1.rds.amazonaws.com\"\n\nDATABASEURI = \"postgresql://\"+DB_USER+\":\"+DB_PASSWORD+\"@\"+DB_SERVER+\"/w4111\"\n\n\n#\n# This line creates a database engine that knows how to connect to the URI above\n#\nengine = create_engine(DATABASEURI)\n\n\n# Here we create a test table and insert some values in it\nengine.execute(\"\"\"DROP TABLE IF EXISTS test;\"\"\")\nengine.execute(\"\"\"CREATE TABLE IF NOT EXISTS test (\n id serial,\n name text\n);\"\"\")\nengine.execute(\"\"\"INSERT INTO test(name) VALUES ('grace hopper'), ('alan turing'), ('ada lovelace');\"\"\")\n\nschools = []\nranks = []\nwith open('static/u_rank.csv', 'rb') as file:\n reader = csv.reader(file)\n header = reader.next()\n for row in reader:\n schools.append(row[0])\n ranks.append(row[1])\n\ns_rank = dict(zip(schools, ranks))\nprint(schools)\nprint(s_rank)\n\n@app.before_request\ndef before_request():\n \"\"\"\n This function is run at the beginning of every web request\n (every time you enter an address in the web browser).\n We use it to setup a database connection that can be used throughout the request\n The variable g is globally accessible\n \"\"\"\n try:\n g.conn = engine.connect()\n except:\n print(\"uh oh, problem connecting to database\")\n import traceback; traceback.print_exc()\n g.conn = None\n\n@app.teardown_request\ndef teardown_request(exception):\n \"\"\"\n At the end of the web request, this makes sure to close the database connection.\n If you don't the database could run out of memory!\n \"\"\"\n try:\n g.conn.close()\n except Exception as e:\n pass\n\n\n#\n# @app.route is a decorator around index() that means:\n# run index() whenever the user tries to access the \"/\" path using a GET request\n#\n# If you wanted the user to go to e.g., localhost:8111/foobar/ with POST or GET then you could use\n#\n# @app.route(\"/foobar/\", methods=[\"POST\", \"GET\"])\n#\n# PROTIP: (the trailing / in the path is important)\n#\n# see for routing: http://flask.pocoo.org/docs/0.10/quickstart/#routing\n# see for decorators: http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/\n#\n@app.route('/')\ndef index():\n \"\"\"\n request is a special object that Flask provides to access web request information:\n request.method: \"GET\" or \"POST\"\n request.form: if the browser submitted a form, this contains the data in the form\n request.args: dictionary of URL arguments e.g., {a:1, b:2} for http://localhost?a=1&b=2\n See its API: http://flask.pocoo.org/docs/0.10/api/#incoming-request-data\n \"\"\"\n\n # DEBUG: this is debugging code to see what request looks like\n if not session.get('logged_in'):\n return render_template('login.html')\n print (request.args)\n\n #\n # example of a database query\n #\n cursor = g.conn.execute(\"SELECT name FROM test\")\n names = []\n for result in cursor:\n names.append(result['name']) # can also be accessed using result[0]\n cursor.close()\n\n #\n # Flask uses Jinja templates, which is an extension to HTML where you can\n # pass data to a template and dynamically generate HTML based on the data\n # (you can think of it as simple PHP)\n # documentation: https://realpython.com/blog/python/primer-on-jinja-templating/\n #\n # You can see an example template in templates/index.html\n #\n # context are the variables that are passed to the template.\n # for example, \"data\" key in the context variable defined below will be\n # accessible as a variable in index.html:\n #\n # # will print: [u'grace hopper', u'alan turing', u'ada lovelace']\n #
{{data}}
\n #\n # # creates a
tag for each element in data\n # # will print:\n # #\n # #
grace hopper
\n # #
alan turing
\n # #
ada lovelace
\n # #\n # {% for n in data %}\n #
{{n}}
\n # {% endfor %}\n #\n context = dict(data = names)\n\n\n #\n # render_template looks in the templates/ folder for files.\n # for example, the below file reads template/index.html\n #\n return render_template(\"index.html\", **context)\n\n@app.route('/test')\ndef test():\n context = dict(data=schools)\n return render_template(\"test.html\", **context)\n\n@app.route('/another')\ndef another():\n return render_template(\"anotherfile.html\")\n\n'''Completed'''\n@app.route('/register', methods= ['POST','GET'])\ndef register():\n if request.method == 'POST':\n cursor = g.conn.execute(\"SELECT user_account FROM user_affiliation Where user_account = '%s';\" % request.form['username'])\n cursor1 = g.conn.execute(\"SELECT email FROM user_affiliation Where email = '%s';\" % request.form['email'])\n rows = cursor.fetchall()\n rows1 = cursor1.fetchall()\n flag = False\n if rows or rows1:\n flag = True\n context = dict(flag=flag,schools = schools)\n cursor.close()\n return render_template(\"register.html\", **context)\n else:\n query = \"INSERT INTO user_affiliation (user_account, nickname, password, birthdate, email, gender, school_name, dept_name, degree, credit) VALUES (%s, %s, %s,%s, %s, %s, %s, %s, %s, %s);\"\n data = (request.form['username'], request.form['nickname'],request.form['password'], request.form['birthdate'],request.form['email'], request.form['gender'],request.form['school'],request.form['department'],request.form['degree'], 100)\n print(data)\n cursor = g.conn.execute(query, data)\n print(\"successfully registered\")\n session['logged_in'] = False\n cursor.close()\n context = dict(flag = True)\n return render_template('login.html', **context)\n else:\n context = dict(schools=schools)\n return render_template(\"register.html\", **context)\n\n@app.route('/get_dept', methods=[\"POST\"])\ndef get_dept():\n school = request.form[\"school\"]\n cursor = g.conn.execute(\"SELECT name from department_has where school_name = '%s';\" % school)\n depts = []\n for dept in cursor.fetchall():\n depts.append(dept[0])\n return jsonify(depts=depts)\n\n# Example of adding new data to the database\n# @app.route('/add', methods=['POST'])\n# def add():\n# name = request.form['name']\n# print(name)\n# cmd = 'INSERT INTO test(name) VALUES (:name1), (:name2)';\n# g.conn.execute(text(cmd), name1 = name, name2 = name);\n# return redirect('/')\n\n#TODO: add some notifications!!\n@app.route('/login', methods=['POST', 'GET'])\ndef do_admin_login():\n if request.method == 'POST':\n cursor = g.conn.execute(\"SELECT user_account, password FROM user_affiliation Where user_account = '%s';\" % request.form['username'])\n rows = cursor.fetchall()\n if not rows:\n flash(\"No such account exisits!\")\n else:\n password = rows[0][1]\n print(password)\n if password == request.form['password']:\n session['logged_in'] = True\n session['username'] = request.form['username']\n else:\n flash(\"Wrong password!\")\n cursor.close()\n return index()\n else:\n return render_template(\"login.html\")\n\n@app.route('/profile_navigate')\ndef profile_navigate():\n query = '''SELECT * from user_affiliation where user_account = '%s'; ''' % session[\"username\"]\n cursor = g.conn.execute(query)\n rows = cursor.fetchall()[0]\n print(rows)\n names = [\"\", \"nickname\", \"birthdate\", \"email\", \"degree\", \"credit\", \"gender\", \"\", \"school\", \"department\"]\n profile = {}\n for i in range(len(rows)):\n if i != 0 and i != 7:\n profile[names[i]] = rows[i]\n context = dict(profile=profile)\n return render_template(\"navigate_profile.html\", **context)\n\n@app.route('/set_profile', methods = ['POST','GET'])\ndef set_profile():\n if request.method =='GET':\n return render_template(\"set_profile.html\",schools=schools)\n else:\n print(\"What the fuck?\")\n query1 = \"SELECT email FROM user_affiliation Where email='%s' and user_account!='%s';\"\n cursor1 = g.conn.execute(query1 % (request.form['email'],session['username']))\n rows1 = cursor1.fetchall()\n if rows1:\n print(\"row is empty\")\n flag = True\n context = dict(flag=flag, schools=schools)\n cursor1.close()\n return render_template(\"set_profile.html\", **context)\n query = \"UPDATE user_affiliation SET nickname=%s, birthdate=%s, email=%s, gender=%s, school_name=%s, dept_name=%s, degree=%s WHERE user_account = %s;\"\n data = (request.form[\"nickname\"], request.form[\"birthdate\"], request.form[\"email\"], request.form[\"gender\"], request.form[\"school\"], request.form[\"department\"], request.form[\"degree\"], session['username'])\n print(\"right!!!!\")\n g.conn.execute(query, data)\n print(\"right!!!!agian!!\")\n modified_flag = True\n return render_template(\"notification.html\", modified_flag = modified_flag)\n\n@app.route(\"/set_password\", methods=['POST','GET'])\ndef set_passowrd():\n if request.method == 'POST':\n query = \"UPDATE user_affiliation SET password = '%s';\" % request.form[\"new_password\"]\n g.conn.execute(query)\n session['logged_in'] = False\n return redirect(\"/login\")\n else:\n return render_template(\"set_password.html\")\n\n'''Completed'''\n@app.route('/setPreference', methods=['POST', 'GET'])\ndef set_preference():\n if request.method == 'GET':\n cursor = g.conn.execute(\"SELECT * FROM preference_has Where user_account = '%s';\" % session['username'])\n rows = cursor.fetchall()\n preference = []\n if rows:\n preference = rows[0]\n print(preference)\n context = dict(preference = preference)\n return render_template('set_preference.html', **context)\n else:\n cursor = g.conn.execute(\"SELECT * FROM preference_has Where user_account = '%s';\" % session['username'])\n rows = cursor.fetchall()\n data = (request.form['gender'], request.form['same_school'], request.form['rank'], session['username'])\n if rows:\n query = \"UPDATE preference_has SET gender = %s, same_school=%s, rank = %s WHERE user_account = %s;\"\n g.conn.execute(query, data)\n else:\n query = \"INSERT INTO preference_has(gender, same_school, rank, user_account) VALUES (%s, %s, %s,%s);\"\n g.conn.execute(query, data)\n\n cursor1 = g.conn.execute(\"SELECT * FROM preference_has Where user_account = '%s';\" % session['username'])\n context = dict(preference=cursor1.fetchall()[0])\n return render_template('set_preference.html', **context)\n\n@app.route('/goal')\ndef goal():\n cursor = g.conn.execute(\"SELECT * FROM set_goal Where user_account = '%s';\" % session['username'])\n rows = cursor.fetchall()\n print(rows)\n goal_info = []\n for goal in rows:\n sections = []\n cursor1 = g.conn.execute(\"SELECT * FROM has_section WHERE user_account = '%s' and goal_id = %s;\" % (session['username'], goal[0]))\n for section in cursor1.fetchall():\n sections.append((section[0], section[1]))\n info = {}\n info['id'] = goal[0]\n info['time'] = goal[2]\n info['name'] = goal[3]\n info['sections'] = sections\n goal_info.append(info)\n context = dict(goal_info = goal_info)\n return render_template('goal.html',**context)\n\n@app.route('/delete_goal', methods=['POST'])\ndef delete_goal():\n cursor = g.conn.execute(\"SELECT * FROM match WHERE partner_1_id = %s and goal_1_id = %s or partner_2_id = %s and goal_2_id = %s;\",\n (session['username'], request.form['delete'],session['username'], request.form['delete']))\n if cursor.fetchall():\n g.conn.execute(\"UPDATE user_affiliation SET credit = credit - 10 WHERE user_account = '%s';\" % session['username'])\n g.conn.execute(\"DELETE FROM match WHERE partner_1_id = %s and goal_1_id = %s or partner_2_id = %s and goal_2_id = %s;\",\n (session['username'], request.form['delete'],session['username'], request.form['delete']))\n g.conn.execute(\"DELETE FROM set_goal WHERE user_account = '%s' and goal_id = %s;\" % (session['username'], request.form['delete']))\n return redirect(\"/goal\")\n\n@app.route('/create_goal', methods = ['POST', 'GET'])\ndef create_goal():\n if request.method == \"GET\":\n cursor = g.conn.execute(\"SELECT count(*),max(goal_id) from set_goal where user_account = '%s';\" % session['username'])\n info = cursor.fetchall()\n count = info[0][0]\n max_id = 0\n if count!=0:\n max_id = info[0][1]\n print(max_id)\n session[\"max_id\"] = max_id\n if count>=3:\n return render_template('notification.html')\n else:\n cursor = g.conn.execute(\"SELECT distinct test from test_section;\")\n tests = []\n for test in cursor.fetchall():\n tests.append(test[0])\n context = dict(tests = tests)\n return render_template('create_goal.html', **context)\n else:\n g.conn.execute(\"INSERT into set_goal(user_account, goal_id, name, completed_date) values ('%s', %s, '%s', '%s');\"\n % (session[\"username\"], session[\"max_id\"]+1, request.form[\"test\"], request.form[\"completed_date\"]))\n sections = []\n key_dict = request.form.to_dict()\n print(key_dict)\n print(len(key_dict))\n for i in range((len(key_dict)-2)/2):\n sections.append((key_dict[\"section\"+str(i+1)], key_dict[\"score\"+str(i+1)]))\n section_name = {}\n for section in sections:\n if section[0] in section_name:\n continue\n else:\n section_name[section[0]] = 1\n g.conn.execute(\"INSERT into has_section(name, scores, user_account, goal_id) values('%s', %s, '%s', %s);\"%\n (section[0],section[1], session[\"username\"], session[\"max_id\"]+1))\n return redirect(\"/goal\")\n\n@app.route('/get_section', methods=[\"POST\"])\ndef get_section():\n test = request.form[\"test\"]\n cursor = g.conn.execute(\"SELECT section from test_section where test = '%s';\" % test)\n sections = []\n for section in cursor.fetchall():\n sections.append(section[0])\n return jsonify(sections=sections)\n\n@app.route('/get_score', methods=[\"POST\"])\ndef get_score():\n test = request.form[\"test\"]\n section = request.form[\"section\"]\n print(test, section)\n cursor = g.conn.execute(\"SELECT score from test_section where test = '%s' and section = '%s';\" %(test, section))\n score = cursor.fetchall()[0][0]\n return jsonify(score=score)\n\n@app.route('/match', methods=['POST', 'GET'])\ndef match():\n def get_match_info():\n cursor = g.conn.execute(\"SELECT goal_id,name FROM set_goal Where user_account = '%s';\" % session['username'])\n rows = cursor.fetchall()\n print(rows)\n match_info = {}\n for goal in rows:\n query1 = \"SELECT partner_1_id, goal_1_id,time_start, time_end from match where partner_2_id=%s and goal_2_id = %s;\"\n query2 = \"SELECT partner_2_id, goal_2_id,time_start, time_end from match where partner_1_id=%s and goal_1_id = %s;\"\n data = (session['username'], goal[0])\n cursor1= g.conn.execute(query1, data)\n rows1 = cursor1.fetchall()\n cursor2= g.conn.execute(query2, data)\n rows2 = cursor2.fetchall()\n if not rows1 and not rows2:\n match_info[(goal[0],goal[1])] = []\n elif rows1:\n match_info[(goal[0],goal[1])] = list(rows1[0])\n else:\n match_info[(goal[0],goal[1])] = list(rows2[0])\n\n return match_info\n\n if request.method == 'GET':\n context = dict(match_info = get_match_info())\n return render_template(\"match.html\", **context)\n else:\n query = '''with gg(user_account, goal_id, name) \n AS(select g.user_account, g.goal_id, g.name from set_goal g \n where not exists \n (select * from match m \n where g.user_account = m.partner_1_id \n and g.goal_id = m.goal_1_id \n or g.user_account = m.partner_2_id \n and g.goal_id = m.goal_2_id ))\n select gg.user_account, gg.goal_id from gg, set_goal g \n where g.goal_id = %s and g.user_account = %s and gg.name=g.name and gg.user_account!=%s'''\n data = (request.form[\"match\"], session['username'],session['username'])\n cursor = g.conn.execute(query, data)\n rows = cursor.fetchall()\n if not rows:\n context = dict(match_info=get_match_info(), match_flag=True)\n return render_template(\"match.html\", **context)\n random.shuffle(rows)\n partner_id = rows[0][0]\n partner_goal = rows[0][1]\n start_time = str(datetime.date.today())\n end_time = str(datetime.date.today()+datetime.timedelta(days=7))\n query1 = '''Insert into match(partner_1_id, goal_1_id, partner_2_id,\n goal_2_id, time_start, time_end) values (%s, %s, %s, %s,%s,%s)'''\n data1 = (session['username'], request.form[\"match\"], partner_id, partner_goal,start_time,end_time)\n g.conn.execute(query1, data1)\n\n return redirect(\"/match\")\n\n@app.route('/un_match', methods = ['POST'])\ndef un_match():\n query = '''DELETE from match where partner_1_id = %s and goal_1_id = %s or \n partner_2_id = %s and goal_2_id = %s;'''\n data = (session['username'], request.form[\"un_match\"], session['username'], request.form[\"un_match\"])\n g.conn.execute(query, data)\n return redirect(\"/match\")\n\n@app.route('/check', methods=['POST'])\ndef check():\n query = '''SELECT * from user_affiliation where user_account = '%s'; ''' % request.form['check']\n cursor = g.conn.execute(query)\n rows = cursor.fetchall()[0]\n print(rows)\n names = [\"\", \"nickname\", \"birthdate\", \"email\", \"degree\", \"credit\", \"gender\",\"\", \"school\", \"department\"]\n profile = {}\n for i in range(len(rows)):\n if i!= 0 and i!=7:\n profile[names[i]] = rows[i]\n context = dict(profile = profile)\n return render_template(\"check.html\", **context)\n\n@app.route('/admin_login', methods=['POST'])\ndef admin_login():\n if request.method == 'POST':\n cursor = g.conn.execute(\"SELECT account, password FROM admin Where account = '%s';\" % request.form['username'])\n rows = cursor.fetchall()\n if not rows:\n print(\"No such account exisits!\")\n else:\n password = rows[0][1]\n print(password)\n if password == request.form['password']:\n session['logged_in'] = True\n session['admin'] = True\n session['username'] = request.form['username']\n cursor.close()\n return render_template(\"admin.html\")\n else:\n flash(\"Wrong password!\")\n return redirect(\"/login\")\n else:\n return render_template(\"login.html\")\n@app.route('/un_match_admin', methods=['POST'])\ndef un_match_admin():\n query = '''DELETE from match where partner_1_id = %s and goal_1_id = %s and partner_2_id = %s\n and goal_2_id = %s or partner_1_id = %s and goal_1_id = %s and\n partner_2_id = %s and goal_2_id = %s;'''\n data = (request.form['user1'], request.form['goal1'],request.form['user2'], request.form['goal2'],request.form['user2'], request.form['goal2'],request.form['user1'], request.form['goal1'], )\n g.conn.execute(query, data)\n return render_template(\"admin.html\")\n\n\n@app.route('/get_matches', methods=['POST'])\ndef get_matches():\n query = '''SELECT * from match where partner_1_id = %s and partner_2_id = %s\n or partner_1_id = %s and partner_2_id = %s;'''\n data = (request.form[\"user1\"], request.form[\"user2\"],request.form[\"user2\"], request.form[\"user1\"])\n cursor = g.conn.execute(query, data)\n rows = cursor.fetchall()\n if not rows:\n flag = True\n return render_template(\"admin.html\", flag = flag)\n else:\n return render_template(\"admin.html\", matches = rows)\n\n@app.route('/update_credit', methods=['POST'])\ndef update_credit():\n query='''UPDATE user_affiliation set credit= %s where user_account = %s;'''\n data = (request.form['credit'], request.form['user_account'])\n g.conn.execute(query, data)\n return render_template(\"admin.html\")\n\n'''Completed'''\n@app.route(\"/logout\")\ndef logout():\n session['logged_in'] = False\n session['admin'] = False\n return render_template('login.html')\n\n\nif __name__ == \"__main__\":\n import click\n\n @click.command()\n @click.option('--debug', is_flag=True)\n @click.option('--threaded', is_flag=True)\n @click.argument('HOST', default='0.0.0.0')\n @click.argument('PORT', default=8111, type=int)\n def run(debug, threaded, host, port):\n \"\"\"\n This function handles command line parameters.\n Run the server using\n python server.py\n Show the help text using\n python server.py --help\n \"\"\"\n\n HOST, PORT = host, port\n print (\"running on %s:%d\" % (HOST, PORT))\n app.secret_key = os.urandom(12)\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)\n\n\n run()","sub_path":"webserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":22059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"506982460","text":"import json\nimport glob\n\ndef run():\n output = \"install_autocomplete.json\"\n installInformation = json.load(open(\"install_information.json\", 'r'))\n\n with open(\"install_autocomplete.json\", 'w') as installAutocompleteOutputFile:\n packageNames = installInformation.keys();\n packageNames.sort()\n json.dump(packageNames, installAutocompleteOutputFile)\n\nif __name__ == '__main__':\n run()\n","sub_path":"Visual/install_autocomplete_gen.py","file_name":"install_autocomplete_gen.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609932369","text":"from starkware.cairo.lang.compiler.ast.types import TypedIdentifier\nfrom starkware.cairo.lang.compiler.preprocessor.preprocessor_error import PreprocessorError\n\n\ndef assert_no_modifier(typed_identifier: TypedIdentifier):\n \"\"\"\n Throws a PreprocessorError if typed_identifier has a modifier.\n \"\"\"\n if typed_identifier.modifier is not None:\n raise PreprocessorError(\n f\"Unexpected modifier '{typed_identifier.modifier.format()}'.\",\n location=typed_identifier.modifier.location)\n","sub_path":"vendor/cairo-lang/src/starkware/cairo/lang/compiler/preprocessor/preprocessor_utils.py","file_name":"preprocessor_utils.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289398995","text":"# Source: https://oj.leetcode.com/problems/reverse-string/\n# Author: renzongxian\n# Date: 2016-05-15\n\n\"\"\"\n\nWrite a function that takes a string as input and returns the string reversed.\n\nExample:\nGiven s = \"hello\", return \"olleh\".\n\n\"\"\"\n\nclass Solution(object):\n def reverseString(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n s_list = list(s)\n i = 0\n j = len(s_list)-1\n while i < j:\n s_list[i], s_list[j] = s_list[j], s_list[i]\n i += 1\n j -= 1\n return \"\".join(s_list)\n","sub_path":"src/reverseString.py","file_name":"reverseString.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"626318143","text":"'''\nProblema 05: Hallar la radicación de , donde a y n pertenecen\na números enteros positivos.\nAnálisis: Para la solución de este problema, se requiere que\nusuario ingrese el valor de a y n por teclado y el sistema\nrealice el cálculo respectivo y obtenga la radicación r.\nExpresión Algorítmica\nr = a ^ (1/n)\n'''\n\nvalor=int(input(\"base:\\n\"))\nraiz=float(input(\"raiz:\\n\"))\n\nradicacion= (valor ** (1/raiz))\n\nprint(f\"Expresión Algorítmica: {valor}^(1/{raiz})\")\n\nprint(f\"radicacion = {radicacion}\")\n","sub_path":"python-udemy/Practica1/Practica01_05.py","file_name":"Practica01_05.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270619093","text":"import socketserver\nimport sys\nimport pickle\nimport base64\nimport threading\nfrom SecureWeenDataBase import DataBase\nfrom SecureWeenOperationCode import SecureWeenOperationCode\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\n\nclass ServerHandler(socketserver.BaseRequestHandler, SecureWeenOperationCode):\n\tENC_SESSION_KEY_SIZE = 256\n\tsemaphore = threading.Semaphore(1)\n\tsocket_list = list()\n\n\t#클라이언트 요청 처리 함수\n\tdef handle(self):\n\n\t\tself.insertClient()\n\t\tprint(\"▶︎ [%s] 연결\" % self.client_address[0])\n\n\t\tself.db = DataBase()\n\t\tself.id = \"\"\n\n\t\tself.privateKey = RSA.generate(2048)\n\t\tself.publicKey = self.privateKey.publickey()\n\t\tself.client_publicKey = self.recvPublicKey()\n\t\tself.sendPublicKey()\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdata = self.recvPickle()\n\n\t\t\t\tif data[0] == self.CODE_ID_CHECK:\n\t\t\t\t\tflag = self.db.check_id(data[1])\n\t\t\t\t\tself.sendPickle(self.CODE_ID_ANSWER, flag)\n\n\t\t\t\telif data[0] == self.CODE_USER_CHECK:\n\t\t\t\t\tflag = self.db.check_user(data[1][0], data[1][1])\n\t\t\t\t\tself.sendPickle(self.CODE_USER_ANSWER, flag)\n\n\t\t\t\telif data[0] == self.CODE_USER_LOGIN:\n\t\t\t\t\tself.id = data[1]\n\t\t\t\t\tprint(\"▶︎ [%s] 로그인 (아이디 : %s)\" % (self.client_address[0], self.id))\n\n\t\t\t\telif data[0] == self.CODE_USER_LOGOUT:\n\t\t\t\t\tprint(\"▶︎ [%s] 로그아웃 (아이디 : %s)\" % (self.client_address[0], self.id))\n\t\t\t\t\tself.id = \"\"\n\n\t\t\t\telif data[0] == self.CODE_RESIST_REQUEST:\n\t\t\t\t\tflag = self.db.insert_user(data[1][0], data[1][1], data[1][2], data[1][3], data[1][4], data[1][5])\n\t\t\t\t\tself.sendPickle(self.CODE_RESIST_ANSWER, flag)\n\n\t\t\t\telif data[0] == self.CODE_MESSAGE_SEND_REQUEST:\n\t\t\t\t\tself.db.insert_msg(data[1][0], data[1][1], data[1][2])\n\t\t\t\t\tself.relayClient(data[1][1], data[1][2])\n\n\t\t\t\telif data[0] == self.CODE_MESSAGE_READ_REQUEST:\n\t\t\t\t\trows = self.db.select_msg(data[1][0], data[1][1])\n\t\t\t\t\tself.sendPickle(self.CODE_MESSAGE_READ_ANSWER, rows)\n\n\t\t\t\telif data[0] == self.CODE_COUPLE_CANCEL:\n\t\t\t\t\tself.db.change_couple(data[1][0], \"\", \"\", self.USER_COUPLE_NONE)\n\t\t\t\t\tself.db.change_couple(data[1][2], \"\", \"\", self.USER_COUPLE_NONE)\n\n\t\t\t\telif data[0] == self.CODE_COUPLE_REQUEST:\n\t\t\t\t\tself.db.change_couple(data[1][0], data[1][2], data[1][3], self.USER_COUPLE_SEND)\n\t\t\t\t\tself.db.change_couple(data[1][2], data[1][0], data[1][1], self.USER_COUPLE_RECEIVE)\n\n\t\t\t\telif data[0] == self.CODE_COUPLE_ACCEPT:\n\t\t\t\t\tself.db.change_couple(data[1][0], data[1][2], data[1][3], self.USER_COUPLE_EXIST)\n\t\t\t\t\tself.db.change_couple(data[1][2], data[1][0], data[1][1], self.USER_COUPLE_EXIST)\n\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t\texcept Exception as e:\n\t\t\t\tself.deleteClient()\n\t\t\t\tif self.id != \"\":\n\t\t\t\t\tprint(\"▶︎ [%s] 로그아웃 (아이디 : %s)\" % (self.client_address[0], self.id))\n\t\t\t\tprint(\"▶︎ [%s] 연결종료\" % self.client_address[0])\n\t\t\t\tsys.exit()\n\n\t# pickle 데이터 전송 함수\n\tdef sendPickle(self, op_code, data):\n\t\tlist_data = list()\n\t\tlist_data.append(op_code)\n\t\tlist_data.append(data)\n\t\tpickle_data = pickle.dumps(list_data)\n\n\t\tmyhash = SHA.new(pickle_data)\n\t\tsignature = PKCS1_v1_5.new(self.privateKey)\n\t\tsignature = signature.sign(myhash)\n\t\tpickle_data = signature + pickle_data\n\n\t\tsessionKey = Random.new().read(32) # 256 bit\n\t\tiv = Random.new().read(16) # 128 bit\n\t\tobj = AES.new(sessionKey, AES.MODE_CFB, iv)\n\n\t\tcipher_pickle_data = iv + obj.encrypt(pickle_data)\n\n\t\tcipherRsa = PKCS1_OAEP.new(self.client_publicKey)\n\t\tenc_sessionKey = cipherRsa.encrypt(sessionKey)\n\n\t\tsend_data = enc_sessionKey + cipher_pickle_data\n\n\t\tsend_data = base64.b64encode(send_data)\n\n\t\tself.sendSize(len(send_data))\n\t\tself.request.sendall(send_data)\n\n\t# pickle 데이터 수신 함수\n\tdef recvPickle(self):\n\t\tsize = self.recvSize()\n\t\trecv_size = 0\n\t\tdata = b\"\"\n\n\t\twhile recv_size < size:\n\t\t\trecv_data = self.request.recv(1024)\n\t\t\tif not recv_data:\n\t\t\t\traise Exception\n\t\t\trecv_size += len(recv_data)\n\t\t\tdata += recv_data\n\n\t\tdata = base64.b64decode(data)\n\n\t\tcipherRsa = PKCS1_OAEP.new(self.privateKey)\n\t\tsessionKey = cipherRsa.decrypt(data[:self.ENC_SESSION_KEY_SIZE])\n\t\tcipher_pickle_data = data[self.ENC_SESSION_KEY_SIZE:]\n\n\t\tiv = cipher_pickle_data[:16]\n\t\tobj = AES.new(sessionKey, AES.MODE_CFB, iv)\n\t\tsig_pickle_data = obj.decrypt(cipher_pickle_data[16:])\n\n\t\tcipherRsa = PKCS1_v1_5.new(self.client_publicKey)\n\n\t\tmyhash = SHA.new(sig_pickle_data[256:])\n\n\t\tresult = cipherRsa.verify(myhash, sig_pickle_data[:256])\n\n\t\tif not result:\n\t\t\tprint(\"서명값이 올바르지 않습니다. 프로그램을 종료합니다.\")\n\t\t\tsys.exit()\n\n\t\tpickle_data = pickle.loads(sig_pickle_data[256:])\n\n\t\treturn pickle_data\n\n\t# 전송할 데이터 크기 전송 함수\n\tdef sendSize(self, size):\n\t\tself.request.sendall(size.to_bytes(4, \"little\"))\n\n\t# 전송할 데이터 크기 수신 함수\n\tdef recvSize(self):\n\t\tsize = self.request.recv(4)\n\t\tif not size:\n\t\t\traise Exception\n\t\tsize = int.from_bytes(size, \"little\")\n\n\t\treturn size\n\n\t# 공개키 전송 함수\n\tdef sendPublicKey(self):\n\t\tlist_data = list()\n\t\tlist_data.append(self.CODE_PUBLICKEY_ANSWER)\n\t\tlist_data.append(self.publicKey.exportKey(\"PEM\"))\n\t\tpickle_data = pickle.dumps(list_data)\n\t\tself.sendSize(len(pickle_data))\n\t\tself.request.sendall(pickle_data)\n\n\t# 공개키 수신 함수\n\tdef recvPublicKey(self):\n\t\tsize = self.recvSize()\n\t\trecv_size = 0\n\t\tdata = b\"\"\n\t\twhile recv_size < size:\n\t\t\trecv_data = self.request.recv(1024)\n\t\t\trecv_size += len(recv_data)\n\t\t\tdata += recv_data\n\t\tpickle_data = pickle.loads(data)\n\t\tpickle_data[1] = RSA.importKey(pickle_data[1])\n\t\treturn pickle_data[1]\n\n\t# 클라이언트 저장 함수\n\tdef insertClient(self):\n\t\tServerHandler.semaphore.acquire()\n\t\tServerHandler.socket_list.append(self)\n\t\tServerHandler.semaphore.release()\n\n\t# 클라이언트 삭제 함수\n\tdef deleteClient(self):\n\t\tServerHandler.semaphore.acquire()\n\t\tServerHandler.socket_list.remove(self)\n\t\tServerHandler.semaphore.release()\n\n\t# 클라이언트 메시지 전달 함수\n\tdef relayClient(self, recevieId, content):\n\t\tServerHandler.semaphore.acquire()\n\t\tfor obj in ServerHandler.socket_list:\n\t\t\tif obj.id == recevieId:\n\t\t\t\tobj.sendPickle(self.CODE_MESSAGE_SEND_ANSWER, content)\n\t\t\t\tbreak\n\t\tServerHandler.semaphore.release()\n\n\nif __name__ == \"__main__\":\n\tHOST = \"127.0.0.1\"\n\tPORT = 9999\n\tLISTEN = 5\n\n\t# 주소 재사용 가능 설정\n\tsocketserver.TCPServer.allow_reuse_address = True\n\tserver_socket = socketserver.ThreadingTCPServer((HOST, PORT), ServerHandler)\n\ttry:\n\t\tserver_socket.serve_forever()\n\texcept KeyboardInterrupt:\n\t\tserver_socket.shutdown()\n\t\tserver_socket.server_close()\n","sub_path":"SecureWeenSever.py","file_name":"SecureWeenSever.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145776889","text":"# encoding: utf-8\n# -*- coding: utf-8 -*-\n# author = ‘LW’\n\"\"\"\n基本类型\n应用类型\n\n\"\"\"\ng_a = 1\ng_b = [1]\ng_c = [1]\n\n\ndef test_1(a, b, c):\n a = 2 # 简单类型,新的local变量\n b = [1, 2] # 重新赋值,改变了b的引用地址,b引用了新的local变量\n c.append(3) # 未改变引用地址\n print('inner:', a, b, c)\n\n\ntest_1(g_a, g_b, g_c)\nprint('outer:', g_a, g_b, g_c)\n","sub_path":"studysrc/day05/func_params_2.py","file_name":"func_params_2.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"239845890","text":"# Configure matplotlib to output 2x resolution for retina-style displays\nfrom IPython.display import set_matplotlib_formats\nset_matplotlib_formats('retina')\n\n#Configure default plot size\nplt.rcParams['figure.figsize'] = [15, 5]\n\n#Set random seeds for reproducibility\nnp.random.seed(42)\nrandom.seed(42)\n\n#Helper function to report progress of file download\ndef progress_report_hook(count, block_size, total_size):\n mb = int(count * block_size // 1e6)\n if count % 500 == 0:\n sys.stdout.write(\"\\r{} MB downloaded\".format(mb))\n sys.stdout.flush() \n \n#Functions to convert a pandas series to an object\ndef series_to_obj(ts, cat=None):\n obj = {\"start\": str(ts.index[0]), \"target\": list(ts)}\n if cat is not None:\n obj[\"cat\"] = cat\n return obj\n\ndef series_to_jsonline(ts, cat=None):\n return json.dumps(series_to_obj(ts, cat))\n\n#Helper class to request predictions from the DeepAR model\nclass DeepARPredictor(sagemaker.predictor.RealTimePredictor):\n\n def set_prediction_parameters(self, freq, prediction_length):\n \"\"\"Set the time frequency and prediction length parameters. This method **must** be called\n before being able to use `predict`.\n \n Parameters:\n freq -- string indicating the time frequency\n prediction_length -- integer, number of predicted time points\n \n Return value: none.\n \"\"\"\n self.freq = freq\n self.prediction_length = prediction_length\n \n def predict(self, ts, cat=None, encoding=\"utf-8\", num_samples=100, quantiles=[\"0.1\", \"0.5\", \"0.9\"]):\n \"\"\"Requests the prediction of for the time series listed in `ts`, each with the (optional)\n corresponding category listed in `cat`.\n \n Parameters:\n ts -- list of `pandas.Series` objects, the time series to predict\n cat -- list of integers (default: None)\n encoding -- string, encoding to use for the request (default: \"utf-8\")\n num_samples -- integer, number of samples to compute at prediction time (default: 100)\n quantiles -- list of strings specifying the quantiles to compute (default: [\"0.1\", \"0.5\", \"0.9\"])\n \n Return value: list of `pandas.DataFrame` objects, each containing the predictions\n \"\"\"\n prediction_times = [x.index[-1]+1 for x in ts]\n req = self.__encode_request(ts, cat, encoding, num_samples, quantiles)\n res = super(DeepARPredictor, self).predict(req)\n return self.__decode_response(res, prediction_times, encoding)\n \n def __encode_request(self, ts, cat, encoding, num_samples, quantiles):\n instances = [series_to_obj(ts[k], cat[k] if cat else None) for k in range(len(ts))]\n configuration = {\"num_samples\": num_samples, \"output_types\": [\"quantiles\"], \"quantiles\": quantiles}\n http_request_data = {\"instances\": instances, \"configuration\": configuration}\n return json.dumps(http_request_data).encode(encoding)\n \n def __decode_response(self, response, prediction_times, encoding):\n response_data = json.loads(response.decode(encoding))\n list_of_df = []\n for k in range(len(prediction_times)):\n prediction_index = pd.DatetimeIndex(start=prediction_times[k], freq=self.freq, periods=self.prediction_length)\n list_of_df.append(pd.DataFrame(data=response_data['predictions'][k]['quantiles'], index=prediction_index))\n return list_of_df","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"42840235","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport logging\nfrom concurrent import futures\n\nlogger = logging.getLogger(__name__)\n\n\ndef _init_model_func(model_dir, model_name, input_shape):\n from policy.resnet_trading_model import ResnetTradingModel\n # build model and save to model_dir\n model = ResnetTradingModel(\n name=model_name,\n model_dir=model_dir,\n load_model=False,\n input_shape=input_shape,\n )\n model_file_name = model.save_model(model_dir, model_name)\n return model_file_name\n\n\ndef _improve_func(\n model_dir, data_dir, model_name, input_shape, steps_per_epoch, batch_size, buffer_size\n):\n from common.sim_dataset import SimDataSet\n from policy.resnet_trading_model import ResnetTradingModel\n # load `state of the art` model\n model = ResnetTradingModel(\n name=model_name,\n model_dir=model_dir,\n load_model=True,\n input_shape=input_shape,\n )\n # load train data\n sim_ds = SimDataSet(data_dir=data_dir, pool_size=buffer_size)\n current_model_file_name = None\n while True:\n # training forever\n model.fit_generator(\n generator=sim_ds.generator(batch_size=batch_size),\n steps_per_epoch=steps_per_epoch,\n )\n # checkpoint: save model in model_dir\n current_model_file_name = model.save_model(model_dir, model_name)\n return current_model_file_name\n\n\nclass PolicyIterator(object):\n\n def __init__(self, data_dir, model_dir, input_shape, data_buffer_size=10000):\n self._input_shape = input_shape\n self._model_dir = model_dir\n self._data_dir = data_dir\n self._data_buffer_size = data_buffer_size\n\n def init_model(self, model_name):\n with futures.ProcessPoolExecutor(max_workers=1) as executor:\n f = executor.submit(\n _init_model_func, self._model_dir, model_name, self._input_shape\n )\n res = f.result()\n if not res:\n logger.error('init_model error:{e}'.format(e=f.exception()))\n return None\n return res\n\n def improve(self, model_name, batch_size=2048, steps_per_epoch=100):\n with futures.ProcessPoolExecutor(max_workers=1) as executor:\n f = executor.submit(\n _improve_func, self._model_dir, self._data_dir, model_name, self._input_shape,\n steps_per_epoch, batch_size, self._data_buffer_size,\n )\n new_model_file_name = f.result()\n if not new_model_file_name:\n logger.error('improve_model error:{e}'.format(e=f.exception()))\n return None\n return new_model_file_name\n","sub_path":"micro_market/pipeline/policy_iterator.py","file_name":"policy_iterator.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"123089649","text":"import torch\nimport torchvision\nmodel_path = r'd:\\dataset\\UA-DETRAC\\model_9.pth'\ncheckpoint = torch.load(\n model_path, map_location='cpu')\nmodel = torchvision.models.detection.fasterrcnn_resnet50_fpn(\n num_classes=5, pretrained=False)\nmodel.load_state_dict(checkpoint['model'])\nmodel.eval()\nx = torch.rand(1, 3, 720, 1280)\ntorch.onnx.export(model, args=x, f=\"carmodel2.onnx\",\n do_constant_folding=True, verbose=True, opset_version=11)\n","sub_path":"exportoonx.py","file_name":"exportoonx.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293686697","text":"n=int(input(\"Digite um número inteiro: \"))\ncont=0\n\nwhile(n>0):\n\n #Devolve o último digito\n adjacente=n%10\n \n #Retira o ultimo digito, devolvendo o resto dos números\n n=n//10\n\n novo=n%10\n\n #testa se o número anterior é igual ao atual\n if(adjacente==novo):\n cont+=1\n \nif(cont>0):\n print(\"sim\")\nelse:\n print(\"não\")\n","sub_path":"númeroAdjacente.py","file_name":"númeroAdjacente.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238656140","text":"# -*- coding: utf-8 -*- \n\"\"\"\nCreated on Mon Jun 24 14:30:13 2019\n\n@author: kumar.shivam(kumar.shivam@xceedance.com)\n\"\"\"\n\nfrom location_mapping import mapping\nfrom location_pre_process import pre_process\nfrom logger import logging_process\nfrom file_helper import filehelper\nfrom constants import constants\nfrom generic_mapping import genericmapping\nfrom account_mapping import mapping_account\nfrom account_pre_process import AIR_base_file\nimport time\n\n\ndef main():\n \"\"\"\n This is the main script..\n Logger is initiated for logging purpose.\n Calling all the instances here in this block.\n \"\"\"\n start_time = time.time()\n print(\"Process started!\") \n logger, logging = logging_process()\n print(\"Loggging process started...\")\n AIR_location_file, database, server = pre_process().read_sql_data(logger)\n print(\"Starting to convert CEDE %s on %s to OED file...\" %(database, server))\n print(\"AIR location data prepared for mapping...\")\n filehelper_obj = filehelper()\n OED_location_file = filehelper_obj.OED_location_file_preprocess(logger)\n print(\"OED blank location file prepared...\")\n mapping_obj = mapping()\n OED_location_file_direct_mapped = mapping_obj.direct_mapping(OED_location_file,AIR_location_file,logger)\n print(\"Direct mapping is done for OED location file...\")\n OED_location_file_value_mapped = mapping_obj.value_mapping(OED_location_file_direct_mapped,AIR_location_file,logger)\n print(\"Value mapping is done for location...\")\n OED_location_file_final = mapping_obj.conditional_mapping(OED_location_file_value_mapped,AIR_location_file,logger)\n print(\"Conditional mapping done for location file...\")\n filehelper_obj.output_write(OED_location_file_final,constants.OP_LOCATION,logger)\n\n OED_account_file_blank = filehelper().OED_account_file_blank(logger)\n print(\"OED blank account file prepared...\")\n AIR_account_file = AIR_base_file().AIR_account_read(logger) \n print(\"AIR account data prepared for mapping...\")\n genericmapping_obj = genericmapping()\n OED_direct_mapped = genericmapping_obj.direct_mapping(OED_account_file_blank, AIR_account_file, constants.ACCOUNT_DIRECT_MAPPING_JSON, logger)\n print(\"OED account direct mapping done...\")\n OED_file_value_mapped = genericmapping_obj.peril_mapper(OED_direct_mapped, AIR_account_file,[constants.OED_ACC_PERIL_COL,constants.OED_POL_PERIL_COV,constants.OED_POL_PERIL,constants.OED_COND_PERIL],logger)\n print(\"OED account peril mapping done...\")\n OED_conditional_mapped = mapping_account().conditional_mapping(OED_file_value_mapped, AIR_account_file,logger)\n print(\"OED conditional mapping done...\")\n filehelper_obj.output_write(OED_conditional_mapped, constants.OP_ACCOUNT , logger)\n logging.shutdown()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"src/AIR_OED_conversion.py","file_name":"AIR_OED_conversion.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592247829","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import Spinbox\nfrom tkinter import scrolledtext\nwin = tk.Tk()\nwin.title(\"Tabbed Widgets\")\n\ntabControl = ttk.Notebook(win) # creates tabl\n\ntab1 = ttk.Frame(tabControl)\ntabControl.add(tab1, text = \"Tab 1\")\ntab2 = ttk.Frame(tabControl)\ntabControl.add(tab2, text = \"Tab 2\")\n\ntabControl.pack(expand = 1, fill = \"both\") # pack to make visible\n\n'''\nhierarchy, tab -> labelframe -> labelInside\n'''\n# make a label Frame\nmighty = ttk.LabelFrame(tab1, text = \"Mighty Python\")\nmighty.grid(column = 0, row = 0, padx = 8, pady = 20)\n\n# label using mighty as parent\nmy_label = ttk.Label(mighty, text = \"Enter a name:\")\nmy_label.grid(column = 0, row = 0, padx = 20, pady = 20, sticky = 'W')\nval = tk.StringVar()\ntk.Entry(mighty, textvariable = val).grid(column = 1, row = 0, padx = 10, pady = 10, sticky = 'W')\n\n# spinbox callback\ndef _spin():\n value = spin.get()\n print(value)\n scrol.insert(tk.INSERT, value + '\\n')\n\n# adding a spinbox Widget\nspin = Spinbox(mighty, from_ = 0, to = 10, width = 5, bd = 8, command = _spin)\nspin.grid(column = 0, row = 2, padx = 10, pady = 10) # bd-border width\n\nscrol = scrolledtext.ScrolledText(mighty, height = 10, width = 40, wrap = tk.WORD)\nscrol.grid(row = 3, columnspan = 4)\n\nmy_label2 = ttk.Label(mighty, text = \"Choose a number\")\nmy_label2.grid(column = 0, row = 1, padx = 20, pady = 20, sticky = 'W', )\n\ncomboval = tk.StringVar()\ncombo = ttk.Combobox(mighty, textvariable = comboval, state = 'readonly')\ncombo['values'] = (1,2,3,4,5)\ncombo.grid(column = 1, row = 1, padx = 14, pady = 20, sticky = 'W')\ncombo.current(0)\n\n# adding in tab2\nabout = ttk.LabelFrame(tab2, text = \"About\")\nabout.grid(column = 0, row = 0, padx = 8, pady = 20)\nttk.Label(about, text = \"Hello it's me bikash das\").grid(padx = 20, pady = 10,column = 0, row = 0)\nwin.mainloop()\n","sub_path":"tkinter/ch-2/tabbedWidgets.py","file_name":"tabbedWidgets.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"279928542","text":"from django.conf.urls import include, patterns, url\n\nfrom rest_framework.routers import SimpleRouter\n\nfrom mkt.fireplace.views import (AppViewSet, CollectionViewSet,\n ConsumerInfoView, SearchView)\n\n\napps = SimpleRouter()\napps.register(r'app', AppViewSet, base_name='fireplace-app')\n\n\ncollections = SimpleRouter()\ncollections.register(r'collection', CollectionViewSet,\n base_name='fireplace-collection')\n\n\nurlpatterns = patterns('',\n url(r'^fireplace/', include(apps.urls)),\n url(r'^fireplace/', include(collections.urls)),\n url(r'^fireplace/consumer-info/',\n ConsumerInfoView.as_view(),\n name='fireplace-consumer-info'),\n # /featured/ is not used by fireplace anymore, but still used by yogafire,\n # so we have to keep it, it's just an alias to the regular search instead\n # of including extra data about collections.\n url(r'^fireplace/search/featured/',\n SearchView.as_view(),\n name='fireplace-featured-search-api'),\n url(r'^fireplace/search/',\n SearchView.as_view(),\n name='fireplace-search-api'),\n)\n","sub_path":"mkt/fireplace/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466250237","text":"import os\nimport unittest\nfrom contextlib import closing\n\nimport numpy as np\nimport pandas as pd\n\nfrom ds_discovery.transition.cleaners import ColumnCleaners as Cleaner\n\n\nclass TestCleaner(unittest.TestCase):\n \"\"\"Test: the cleaner \"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_runs(self):\n \"\"\"Basic smoke test\"\"\"\n Cleaner()\n\n def test_get_cols(self):\n cleaner = Cleaner()\n\n self.assertTrue(set(cleaner.filter_headers(self.df, dtype=['object'])).intersection(self.object_col))\n self.assertTrue(set(cleaner.filter_headers(self.df, dtype=['float'])).intersection(self.float_col))\n self.assertTrue(set(cleaner.filter_headers(self.df, dtype=['float', 'object'])).intersection(self.df.columns))\n\n self.assertTrue(set(cleaner.filter_headers(self.df, dtype=['object'], exclude=False)).intersection(self.object_col))\n self.assertTrue(set(cleaner.filter_headers(self.df, dtype=['object'], exclude=True)).difference(self.object_col))\n\n test_col = ['Dummy_Policy_Number', 'ACTIVITY_ID', 'PRODUCT_CATEGORY', 'SYSTEM_NAME']\n test_rev = self.df.columns.difference(test_col)\n\n self.assertTrue(set(cleaner.filter_headers(self.df, headers=test_col, drop=False)).intersection(test_col))\n self.assertTrue(set(cleaner.filter_headers(self.df, headers=test_col, drop=True)).intersection(test_rev))\n\n self.assertTrue(set(cleaner.filter_headers(self.df, headers=test_col, dtype=['object'], exclude=False)).\n intersection(['PRODUCT_CATEGORY', 'SYSTEM_NAME']))\n self.assertTrue(set(cleaner.filter_headers(self.df, dtype=['object'], exclude=True)).\n difference(['Dummy_Policy_Number', 'ACTIVITY_ID']))\n\n def test_excel_to_date(self):\n cfg = Config()\n cleaner = Cleaner()\n cleaner.excel_to_date_type(self.df, drop=cfg.get('data.activity.excel_to_date.drop'),\n headers=cfg.get('data.activity.excel_to_date.columns'))\n self.assertEqual(len(self.df.select_dtypes(include=np.datetime64).columns),\n len(cleaner.filter_headers(self.df, drop=cfg.get('data.activity.excel_to_date.drop'),\n headers=cfg.get('data.activity.excel_to_date.columns'))))\n\n def test_to_date(self):\n cfg = Config()\n cleaner = Cleaner()\n cleaner.to_date_type(self.df, drop=cfg.get('data.activity.to_date.drop'),\n headers=cfg.get('data.activity.to_date.columns'))\n\n self.assertEqual(len(self.df.select_dtypes(include=np.datetime64).columns),\n len(cleaner.filter_headers(self.df, drop=cfg.get('data.activity.to_date.drop'),\n headers=cfg.get('data.activity.to_date.columns'))))\n\n def test_clean_date(self):\n \"\"\"clean the data\"\"\"\n cfg = Config()\n cleaner = Cleaner()\n\n test_df = cleaner.clean_data(self.df, cfg, 'activity')\n\n self.assertTrue(self.df is test_df)\n\n self.assertEqual(len(test_df.select_dtypes(include='int64').columns), 9)\n self.assertEqual(len(test_df.select_dtypes(include='category').columns), 7)\n self.assertEqual(len(test_df.select_dtypes(include='float64').columns), 1)\n self.assertEqual(len(test_df.select_dtypes(include='object').columns), 1)\n\n def test_clean_steps(self):\n cfg = Config()\n\n # Data cleaners\n cleaner = Cleaner()\n cleaner.remove_columns(self.df, drop=cfg.get('data.activity.remove.drop'),\n headers=cfg.get('data.activity.remove.columns'))\n cleaner.to_int_type(self.df, drop=cfg.get('data.activity.to_int.drop'),\n headers=cfg.get('data.activity.to_int.columns'))\n cleaner.to_category_type(self.df, drop=cfg.get('data.activity.to_category.drop'),\n headers=cfg.get('data.activity.to_category.columns'))\n cleaner.to_date_type(self.df, drop=cfg.get('data.activity.to_date.drop'),\n headers=cfg.get('data.activity.to_date.columns'))\n cleaner.excel_to_date_type(self.df, drop=cfg.get('data.activity.excel_to_date.drop'),\n headers=cfg.get('data.activity.excel_to_date.columns'))\n\n self.assertEqual(len(self.df.select_dtypes(include='int64').columns), 9)\n self.assertEqual(len(self.df.select_dtypes(include='category').columns), 7)\n self.assertEqual(len(self.df.select_dtypes(include='float64').columns), 1)\n self.assertEqual(len(self.df.select_dtypes(include='object').columns), 1)\n\n def test_build_section(self):\n cleaner = Cleaner()\n\n bool_control = {'to_bool': {'headers': ['Head1', 'Head2']}}\n str_control = {'to_str': {'headers': ['Head3', 'Head4']}}\n map_control = {'to_map': {'headers': ['Head5', 'Head6'], 'fillna': '-1', 'bool_map': {'1': True, '2': False}}}\n\n self.assertEqual(bool_control, cleaner._build_section('to_bool',headers=['Head1', 'Head2']))\n self.assertEqual(str_control, cleaner._build_section('to_str',headers=['Head3', 'Head4']))\n self.assertEqual(map_control, cleaner._build_section('to_map',headers=['Head5', 'Head6'],\n fillna='-1', bool_map={'1':True, '2':False}))\n\n def test_controls(self):\n config_file = 'example.csv'\n with closing(open(config_file, 'wt')) as f:\n f.write(self._control_data())\n\n\n os.remove(config_file)\n\n def test_auto(self):\n cleaner = Cleaner()\n data = {}\n df = self.control_frame()\n ctrl_drop = df.drop(columns=[1,2], axis=1)\n cleaner.auto_remove_columns(df)\n self.assertEqual(len(ctrl_drop.columns), len(cleaner.auto_remove_columns(df).columns))\n\n @staticmethod\n def _control_data():\n return '\\n'.join([\"active,age,agent_id,aum,contact,date,datetime,dof,gender,msg,pid,policy,policy_id,postcode,\"\n \"status,type,value\",\n \"Y,52.0,IU-P67590019,650630.36,Questionnaire,2009-01-11,2011-09-28 14:10:50,4,U,ZYINZL,4967516090,\"\n \"Income Drawdown,4867590.0,PN38 3FF,,personal,0.31\",\n \"N,51.0,NW-P22346221,1741499.64,Survey,2004-05-21,1994-08-28 14:10:50,2,M,GWYE LGP XPIR QZHPP JK WW,\"\n \"5462716394,Individual Pension,2727415.0,MK45 5QQ,,personal,0.29\",\n \"N,65.0,ME-P72418321,1316101.13,Visit,2004-10-24,1986-12-04 14:10:50,1,M,YV JWVN GGEBWNSL UNEKNDOBA\"\n \"GV,9514013160,Income Drawdown,4755322.0,IH2 8ZX,,personal,0.31\",\n \"N,44.0,CG-P73745990,724629.12,Internet,2012-03-19,1998-12-23 14:10:50,7,U,JYPRER EDEOV AN YBO PGFPG\"\n \"NHHOABC,9200858769,Mortgage Protection,8267876.0,KR45 7YZ,,personal,0.13\",\n \"Y,54.0,QK-P23777456,442313.86,Questionnaire,2013-01-27,1987-02-16 14:10:50,7,F,ADG SYLV ETND,\"\n \"9172467760,Mortgage Protection,4529466.0,TB54 3XT,,personal,0.35\",\n \"Y,51.0,CH-P38559541,3948234.42,MyPortal,2005-02-05,2018-02-19 14:10:50,-7,F,QKUC FVEB,7519267620,\"\n \"Freestanding AVC,9309998.0,VA45 9RW,,,0.48\"])\n\n @static\n\n @staticmethod\n def control_frame():\n data = {'A' : [1, 10, np.nan, np.nan, 'Yes', 'Fred'],\n 'B' : [2, 11, np.nan, np.nan, 'No', 'Jim'],\n 'C' : [3, '', np.nan, 'common', np.nan, 'Bob'],\n 'D' : [4, '', np.nan, 'common', np.nan, 'John'],\n 'E' : [5, 14, np.nan, np.nan, 'Yes', 'Sarah'],\n 'F' : [6, 15, np.nan, 'common', np.nan, 'Jane'],\n 'G' : [7, 16, np.nan, 'common', np.nan, 'Ian'],\n 'H' : [8, '', np.nan, np.nan, 'No', 'Kelly'],\n 'I' : [9, 17, np.nan, 'common', np.nan, 'Richard'],\n 'J' : [0, 19, np.nan, 'common', np.nan, 'Colin'],\n }\n df = pd.DataFrame(data).transpose()\n df.columns = ['count', 'nulls', 'common', 'qa', 'name']\n return df\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/discovery/cleaners_test_bak.py","file_name":"cleaners_test_bak.py","file_ext":"py","file_size_in_byte":8195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"242026197","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAria's storage.structures module\nPath: aria.storage.structures\n\nmodels module holds aria's models.\n\nclasses:\n * Field - represents a single field.\n * IterField - represents an iterable field.\n * PointerField - represents a single pointer field.\n * IterPointerField - represents an iterable pointers field.\n * Model - abstract model implementation.\n\"\"\"\n\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext import associationproxy\nfrom sqlalchemy import (\n Column,\n ForeignKey,\n Integer,\n Text\n)\n\n\nclass ModelMixin(object):\n\n @classmethod\n def id_column_name(cls):\n raise NotImplementedError\n\n @classmethod\n def name_column_name(cls):\n raise NotImplementedError\n\n @classmethod\n def _get_cls_by_tablename(cls, tablename):\n \"\"\"Return class reference mapped to table.\n\n :param tablename: String with name of table.\n :return: Class reference or None.\n \"\"\"\n if tablename in (cls.__name__, cls.__tablename__):\n return cls\n\n for table_cls in cls._decl_class_registry.values():\n if tablename in (getattr(table_cls, '__name__', None),\n getattr(table_cls, '__tablename__', None)):\n return table_cls\n\n @classmethod\n def foreign_key(cls, table, nullable=False):\n \"\"\"Return a ForeignKey object with the relevant\n\n :param table: Unique id column in the parent table\n :param nullable: Should the column be allowed to remain empty\n \"\"\"\n table_cls = cls._get_cls_by_tablename(table.__tablename__)\n foreign_key_str = '{tablename}.{unique_id}'.format(tablename=table_cls.__tablename__,\n unique_id=table_cls.id_column_name())\n column = Column(ForeignKey(foreign_key_str, ondelete='CASCADE'),\n nullable=nullable)\n column.__remote_table_name = table_cls.__name__\n return column\n\n @classmethod\n def one_to_many_relationship(cls,\n foreign_key_column,\n backreference=None,\n backref_kwargs=None,\n **kwargs):\n \"\"\"Return a one-to-many SQL relationship object\n Meant to be used from inside the *child* object\n\n :param parent_class: Class of the parent table\n :param cls: Class of the child table\n :param foreign_key_column: The column of the foreign key (from the child table)\n :param backreference: The name to give to the reference to the child (on the parent table)\n \"\"\"\n backref_kwargs = backref_kwargs or {}\n parent_table = cls._get_cls_by_tablename(\n getattr(cls, foreign_key_column).__remote_table_name)\n primaryjoin_str = '{parent_class_name}.{parent_unique_id} == ' \\\n '{child_class.__name__}.{foreign_key_column}'\\\n .format(\n parent_class_name=parent_table.__name__,\n parent_unique_id=parent_table.id_column_name(),\n child_class=cls,\n foreign_key_column=foreign_key_column\n )\n return relationship(\n parent_table.__name__,\n primaryjoin=primaryjoin_str,\n foreign_keys=[getattr(cls, foreign_key_column)],\n # The following line make sure that when the *parent* is\n # deleted, all its connected children are deleted as well\n backref=backref(backreference or cls.__tablename__, cascade='all', **backref_kwargs),\n **kwargs\n )\n\n @classmethod\n def relationship_to_self(cls, local_column):\n\n remote_side_str = '{cls.__name__}.{remote_column}'.format(\n cls=cls,\n remote_column=cls.id_column_name()\n )\n primaryjoin_str = '{remote_side_str} == {cls.__name__}.{local_column}'.format(\n remote_side_str=remote_side_str,\n cls=cls,\n local_column=local_column)\n return relationship(cls.__name__,\n primaryjoin=primaryjoin_str,\n remote_side=remote_side_str,\n post_update=True)\n\n def to_dict(self, fields=None, suppress_error=False):\n \"\"\"Return a dict representation of the model\n\n :param suppress_error: If set to True, sets `None` to attributes that\n it's unable to retrieve (e.g., if a relationship wasn't established\n yet, and so it's impossible to access a property through it)\n \"\"\"\n res = dict()\n fields = fields or self.fields()\n for field in fields:\n try:\n field_value = getattr(self, field)\n except AttributeError:\n if suppress_error:\n field_value = None\n else:\n raise\n if isinstance(field_value, list):\n field_value = list(field_value)\n elif isinstance(field_value, dict):\n field_value = dict(field_value)\n elif isinstance(field_value, ModelMixin):\n field_value = field_value.to_dict()\n res[field] = field_value\n\n return res\n\n @classmethod\n def _association_proxies(cls):\n for col, value in vars(cls).items():\n if isinstance(value, associationproxy.AssociationProxy):\n yield col\n\n @classmethod\n def fields(cls):\n \"\"\"Return the list of field names for this table\n\n Mostly for backwards compatibility in the code (that uses `fields`)\n \"\"\"\n fields = set(cls._association_proxies())\n fields.update(cls.__table__.columns.keys())\n return fields - set(getattr(cls, '_private_fields', []))\n\n def __repr__(self):\n return '<{__class__.__name__} id=`{id}`>'.format(\n __class__=self.__class__,\n id=getattr(self, self.name_column_name()))\n\n\nclass ModelIDMixin(object):\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(Text, nullable=True, index=True)\n\n @classmethod\n def id_column_name(cls):\n return 'id'\n\n @classmethod\n def name_column_name(cls):\n return 'name'\n","sub_path":"aria/storage/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496955371","text":"from django.db import models\nfrom django.forms import ModelForm, Textarea\nfrom django.conf import settings\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db.models import Avg\nfrom django.utils.translation import ugettext_lazy as _\n\nclass Image(models.Model):\n\tdescription = models.CharField(max_length=250)\n\timgfile = models.ImageField(upload_to='images/%Y/%m/%d')\n\tuser = models.ForeignKey(\n\t\tsettings.AUTH_USER_MODEL,\n\t\ton_delete=models.CASCADE,\n\t)\n\t# Add created time automatically\n\tcreated_at = models.DateTimeField(auto_now_add=True) \n\t# Add aupdate time automatically\n\tupdated_at = models.DateTimeField(auto_now=True)\n\n\tdef __str__(self):\n\t\treturn self.description\n\n\t@classmethod\n\tdef get_top_images(cls, timestamp, limit):\n\t\t\"\"\"\n\t\tReturn limit number of images newer than timestamp\n\t\twith highest rate average \n\n\t\tArguments:\n\t\ttimestamp -- the timsatmp the images are created after\n\t\tlimit -- the number of images to return\n\t\t\"\"\"\n\t\treturn cls.objects.filter(created_at__gte=timestamp)\\\n\t\t\t.annotate(avg_rate=Avg('vote__rate'))\\\n\t\t\t.order_by('-avg_rate')[:limit]\n\n\t@property\n\tdef image_avg_rate(self):\n\t\t\"\"\"Return the average vote rating for the image\"\"\"\n\t\treturn self.vote_set.aggregate(Avg('rate'))['rate__avg']\n\n\nclass Comment(models.Model):\n\tcomment_text = models.TextField()\n\tuser = models.ForeignKey(\n\t\tsettings.AUTH_USER_MODEL,\n\t\ton_delete=models.CASCADE,\n\t)\n\timage = models.ForeignKey(\n\t\t'Image',\n\t\ton_delete=models.CASCADE,\n\t)\n\tcreated_at = models.DateTimeField(auto_now_add=True)\n\tupdated_at = models.DateTimeField(auto_now=True)\n\n\tdef __str__(self):\n\t\treturn self.comment_text\n\nclass Vote(models.Model):\n\trate = models.PositiveSmallIntegerField(\n\t\tvalidators=[\n\t\t\tMaxValueValidator(5),\n\t\t\tMinValueValidator(1)\n\t\t]\n\t)\n\tuser = models.ForeignKey(\n\t\tsettings.AUTH_USER_MODEL,\n\t\ton_delete=models.CASCADE,\n\t)\n\timage = models.ForeignKey(\n\t\t'Image',\n\t\ton_delete=models.CASCADE,\n\t)\n\n\tclass Meta:\n\t\tunique_together = ('user', 'image')\n\n\n\nclass ImageForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Image\n\t\tfields = ['imgfile', 'description']\n\t\tlabels = {\n\t\t\t'imgfile': _('Image File'),\n\t\t}\n\t\t# Show description as Textarea instead of an input\n\t\twidgets = {\n\t\t\t'description': Textarea(attrs={'cols': 40, 'rows': 2}),\n\t\t}\n\nclass CommentForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Comment\n\t\tfields = ['comment_text']\n\t\twidgets = {\n\t\t\t'comment_text': Textarea(attrs={'cols': 80, 'rows': 2}),\n\t\t}\n\t\t# Don't show label for comment_text\n\t\tlabels = {\n\t\t\t'comment_text': '',\n\t\t}\n\n","sub_path":"faker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101447197","text":"import boto3\nimport time\nfrom botocore.client import ClientError\nfrom datetime import datetime, timedelta, tzinfo\n\nec2 = boto3.client('ec2')\n\ninstances = [\"InstansIds\"]\nimage_prefix = \"backup_\"\n\n\ndef lambda_handler(event, context):\n for instance in instances:\n create_image(image_prefix + tag_name(instance), instance)\n\n\ndef create_image(prefix, instanceid):\n imagename = \"_\".join([prefix, datetime.now().strftime(\"%Y-%m-%d\")])\n\n try:\n # create image noreboot\n response = ec2.create_image(\n InstanceId=instanceid,\n Name=imagename,\n Description='created automatically by Lambda',\n NoReboot=True,\n )\n return\n except ClientError as e:\n print(str(e))\n\n\ndef tag_name(instance_id):\n tags = ec2.describe_instances(InstanceIds=[instance_id])\n for tag in tags['Reservations'][0]['Instances'][0]['Tags']:\n if tag['Key'] == 'Name':\n return tag['Value']\n else:\n return instance_id","sub_path":"lambda/ami-backup.py","file_name":"ami-backup.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"353885095","text":"from datetime import datetime, timedelta\nfrom lxml import html, etree\nimport requests\nfrom parsers.epg_parser import EPGParser\nfrom typing import List\n\nfrom egp.epgchannel import EPGChannel\nfrom egp.epgevent import EPGEvent\n\n\nclass HTMLParser(EPGParser):\n\n def open_file(self, path: str) -> 'file':\n re = requests.get(path)\n return re\n\n def parse_data(self, raw_data) -> List[EPGChannel]:\n data: List[EPGChannel] = list()\n tree = html.fromstring(raw_data)\n get_start_time = etree.XPath('.//span[@class=\"p-programms__item__time-value\"]/text()')\n get_channel_name = etree.XPath('.//div[@class = \"p-channels__item__info\"]/div/a/text()')\n get_events = etree.XPath('.//div[@class = \"p-programms__item__inner\"]')\n get_title = etree.XPath('.//span[@class=\"p-programms__item__name-link\"]/text()')\n all_channel = tree.xpath('//div[starts-with(@class, \"p-channels__item js-channel-item\")]')\n for channel in all_channel:\n channel_programme: List[EPGEvent] = list()\n channel_name = get_channel_name(channel)[0]\n events = get_events(channel)\n events_start_time = list()\n for event in events:\n start_time = get_start_time(event)[0].split(':')\n events_start_time.append(datetime(1, 1, 1, int(start_time[0]), int(start_time[1])))\n buf_time = events_start_time + [events_start_time[-1] + timedelta(hours=1, minutes=10)]\n events_end_time = [a + (b - a) for a, b in zip(buf_time, buf_time[1:])]\n for event, start_time, end_time in zip(events, events_start_time, events_end_time):\n event_desc = EPGEvent(start_time, end_time, get_title(event)[0])\n channel_programme.append(event_desc)\n data.append(EPGChannel(channel_name, channel_programme))\n return data\n\n def extract_data(self, file):\n return file.content\n","sub_path":"parsers/html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"315775835","text":"class Solution:\n def makesquare(self, matchsticks: List[int]) -> bool:\n total_sum = sum(matchsticks)\n if total_sum % 4 != 0:\n return False\n memo = {}\n possible_side = total_sum / 4\n def recurse(mask, sides_done):\n total = 0\n for i in range(len(matchsticks)):\n if not (mask & (1 << len(matchsticks) - 1 - i)):\n total += matchsticks[i]\n if total > 0 and total % possible_side == 0:\n sides_done += 1\n if sides_done == 3:\n return True\n if (mask, sides_done) in memo:\n return memo[(mask, sides_done)]\n ans = False\n rem = possible_side * (int(total / possible_side) + 1) - total\n for i in range(len(matchsticks)):\n if matchsticks[len(matchsticks) - 1 - i] <= rem and mask&(1 << i):\n if recurse(mask ^ (1 << i), sides_done):\n ans = True\n break\n memo[(mask, sides_done)] = ans\n return ans\n return recurse((1 << len(matchsticks)) - 1, 0)","sub_path":"Dynamic Programming/matchSticksToSquare.py","file_name":"matchSticksToSquare.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"579696978","text":"import cybw\nimport time\nfrom deep_sarsa.deep_sarsa_env import *\nimport copy\nimport numpy as np\nimport statistics\nimport pandas as pd\nfrom simple_agent.epsilon_decay import *\nimport datetime\nimport random\nfrom simple_agent.epsilon_decay import *\nfrom multi_agent.observation import *\nfrom multi_agent.state_extractor import *\n\nEPISODES = 1000\nclient = cybw.BWAPIClient\nBroodwar = cybw.Broodwar\n\n\ndef reconnect():\n while not client.connect():\n time.sleep(0.5)\n\n\nclass MultiAgentTrainerCNN:\n def __init__(self, socket, epsilon_decay: EpsilonDecay, very_fast=True, visualize=False, max_iterate=500,\n mode='train', file_or_folder_to_load=''\n , algorithm='DeepSarsa', map_name='', layers=[],\n actor_layers=[], critic_layers=[],\n export_per=-1, last_action_state_also_state=False, eligibility_trace=False):\n self.very_fast = very_fast\n self.socket = socket\n self.max_iterate = max_iterate\n self.visualize = visualize\n self.mode = mode\n assert mode in ['train', 'evaluate', 'evaluate_multiple_models']\n self.file_or_folder_to_load = file_or_folder_to_load\n if mode != 'train':\n assert file_or_folder_to_load != ''\n\n self.do_train = (mode == 'train')\n\n self.algorithm = algorithm\n assert algorithm in ['DeepSarsa', 'DQN', 'A2C', \"CNN\"]\n self.last_action_state_also_state = last_action_state_also_state\n\n self.state_size = X_SIZE * Y_SIZE * STATE_SIZE + NON_SPATIAL_SIZE\n\n self.action_size = 9\n\n self.map_name = map_name\n\n self.export_per = export_per\n self.eligibility_trace = eligibility_trace\n\n self.socket.sendMessage(tag=\"init_info\", msg={\n 'max_iterate': max_iterate,\n 'mode': mode,\n 'file_or_folder_to_load': file_or_folder_to_load,\n 'action_size': self.action_size,\n 'frame_size':(X_SIZE, Y_SIZE, STATE_SIZE),\n 'minimap_frame_size': (X_SIZE, Y_SIZE, MINIMAP_STATE_SIZE),\n 'state_size': self.state_size,\n 'map_name': map_name,\n 'algorithm': algorithm,\n 'layers': layers,\n 'actor_layers': actor_layers,\n 'critic_layers': critic_layers,\n 'export_per': export_per,\n 'eligibility_trace': eligibility_trace,\n 'non_spatial_state_size' : NON_SPATIAL_SIZE\n })\n\n tag, _ = self.socket.receiveMessage()\n assert tag == 'init finished'\n print(tag)\n\n self.total_iterate_count = -1\n self.total_iterate_counter = 0\n\n self.started_time = -1\n\n self.epsilon = 1\n self.epsilon_decay = epsilon_decay\n\n self.observation = Observation(include_self=False)\n\n def set_epsilon_decay(self, epsilon_decay: EpsilonDecay):\n self.epsilon_decay = epsilon_decay\n\n def update_epsilon(self, episode):\n self.epsilon = self.epsilon_decay.get_epsilon(episode)\n\n def get_action(self, state, do_train=True):\n if (np.random.random() <= self.epsilon):\n # return random.randint(0, 8)\n if (np.random.random() > 0.5):\n action = random.randint(0, 7)\n else:\n action = 8\n else:\n self.socket.sendMessage(tag=\"state\", msg=state)\n tag, action = self.socket.receiveMessage()\n assert tag == \"action\"\n action = action[0]\n return action\n\n def evaluate_multiple(self, test_file_path, test_zero=True, test_per=-1, test_iter=-1):\n result_file = open(\"../resultData/%s_test_result.txt\" % test_file_path, 'w')\n\n self.socket.sendMessage(tag=\"test_multiple_model_info\", msg={\n 'test_file_path': test_file_path,\n 'test_per': test_per\n })\n\n tag, msg = self.socket.receiveMessage()\n assert tag == \"file_number\"\n print(\"%d files to test\" % msg)\n self.total_iterate_count = test_iter * (msg + (1 if test_zero else 0))\n\n def eval_and_write_to_file(episode):\n result_info = self.evaluate(target_iterate=test_iter, close_socket=False, print_message=False)\n print(\"Win rate\", result_info[0])\n print(\"Left avg\", result_info[1])\n print(\"Score avg\", result_info[3])\n\n result_file.write(\"%d\\t\" % episode)\n for i in result_info:\n result_file.write('%.4f\\t' % i)\n result_file.write('\\n')\n result_file.flush()\n\n if test_zero:\n eval_and_write_to_file(0)\n\n while True:\n self.socket.sendMessage(tag=\"load_file\", msg=[11111])\n print(\"load file\")\n tag, msg = self.socket.receiveMessage()\n print(tag)\n if tag == \"load finished\":\n print(\"%d trained model loaded:\" % msg[0], msg[1])\n eval_and_write_to_file(msg[0])\n\n elif tag == \"no more file to load\":\n print(\"Test ended\")\n break\n\n self.print_expected_time(time.time())\n\n self.socket.close()\n\n def evaluate(self, target_iterate=-1, close_socket=True, print_message=True, step_frame=5, max_frame=10000):\n episode = 0\n winEpisode = 0\n timeOutEpisode = 0\n score_list = []\n left_unit_list = []\n step_list = []\n\n if target_iterate == -1:\n target_iterate = self.max_iterate\n\n while episode < target_iterate:\n while not Broodwar.isInGame():\n client.update()\n if not client.isConnected():\n print(\"Reconnecting...\")\n reconnect()\n\n if self.very_fast:\n Broodwar.setLocalSpeed(0)\n Broodwar.setGUI(False)\n\n # Broodwar.sendText(\"black sheep wall\")\n\n last_states = {}\n last_spatial_states = {}\n\n last_actions = {}\n\n last_action_target = {}\n\n step = 0\n is_first = True\n last_frame_count = -1\n\n if self.started_time == -1:\n self.started_time = time.time()\n\n while Broodwar.isInGame():\n\n events = Broodwar.getEvents()\n for e in events:\n eventtype = e.getType()\n if eventtype == cybw.EventType.MatchEnd:\n if e.isWinner():\n winEpisode += 1\n step_list.append(step)\n\n left_unit_list.append(len(Broodwar.self().getUnits()) - len(Broodwar.enemy().getUnits()))\n #score_list.append(get_score())\n Broodwar.restartGame()\n\n elif eventtype == cybw.EventType.MatchFrame:\n if last_frame_count >= 0 and Broodwar.getFrameCount() - last_frame_count < step_frame:\n continue\n\n if Broodwar.getFrameCount() > max_frame:\n # print(\"Time over\")\n timeOutEpisode += 1\n Broodwar.restartGame()\n # Broodwar.leaveGame()\n\n last_frame_count = Broodwar.getFrameCount()\n state_minimap = self.observation.get_minimap_state()\n for u in Broodwar.self().getUnits():\n if not u.exists():\n continue\n spatial_state = self.observation.get_local_state(u)\n non_spatial_state = self.observation.get_non_spatial_state(u)\n action = self.get_action((spatial_state, state_minimap, non_spatial_state), do_train=False)\n target = apply_action(u, action)\n last_actions[u.getID()] = action\n last_action_target[u.getID()] = target\n\n step += 1\n is_first = False\n\n if self.visualize:\n #self.observation.draw_box()\n self.observation.draw_minimap()\n draw_action(last_actions, last_action_target)\n\n client.update()\n\n episode += 1\n self.total_iterate_counter += 1\n self.socket.sendMessage(tag=\"episode finished\", msg=[episode])\n\n if print_message:\n # print(\"Left enemy : %d, Score: %d\" % (len(Broodwar.enemy().getUnits()), get_score()))\n print(\"Win / Total : %d / %d, win rate : %.4f\" % (winEpisode, episode, winEpisode / episode))\n if close_socket:\n self.socket.close()\n\n result_info = [0 for _ in range(6)]\n result_info[0] = winEpisode / episode\n result_info[1] = statistics.mean(left_unit_list)\n result_info[2] = statistics.stdev(left_unit_list)\n result_info[3] = statistics.mean(score_list)\n result_info[4] = statistics.stdev(score_list)\n result_info[5] = timeOutEpisode / episode\n result_info[6] = 0 if len(step_list) == 0 else statistics.mean(step_list)\n result_info[7] = 0 if len(step_list) == 0 else statistics.stdev(step_list)\n\n return result_info\n\n def print_expected_time(self, current_time):\n elapsed_time = current_time - self.started_time\n expected_left_time = elapsed_time * (self.total_iterate_count / self.total_iterate_counter) - elapsed_time\n print(\"Current iterate: %d/%d, Elapsed time: %s, Expected left time: %s\" %\n (self.total_iterate_counter, self.total_iterate_count,\n str(datetime.timedelta(seconds=int(elapsed_time))),\n str(datetime.timedelta(seconds=int(expected_left_time)))))\n\n def get_file_name(self, extend='txt'):\n now = datetime.datetime.now()\n nowDate = now.strftime('%Y_%m_%d_%H_%M')\n name = \"../resultData/test_result_%s_%s_%s.%s\" % (self.algorithm, self.map_name, nowDate, extend)\n return name\n\n def train(self, do_test_during_train=False, test_per=-1, test_iterate=-1, test_zero=True, step_frame=10,\n max_frame=10000):\n # env = DeepSARSAEnvironment()\n # agent = DeepSarsaAgent()\n if do_test_during_train:\n assert test_per != -1 and test_iterate != -1\n self.total_iterate_count = self.max_iterate + (\n (self.max_iterate // test_per) + (1 if test_zero else 0)) * test_iterate\n else:\n self.total_iterate_count = self.max_iterate\n\n episode = 0\n winEpisode = 0\n\n if do_test_during_train:\n f = open(self.get_file_name(), 'w')\n\n do_train = (self.mode == 'train')\n results = []\n self.started_time = time.time()\n\n def evaluate_and_write_to_file():\n result_info = self.evaluate(target_iterate=test_iterate, close_socket=False, print_message=False)\n print(\"Win rate\", result_info[0])\n print(\"Left avg\", result_info[1])\n print(\"Score avg\", result_info[3])\n episode_result_info = [episode, self.epsilon] + result_info\n results.append(episode_result_info)\n\n for ind, i in enumerate(episode_result_info):\n if ind == 0:\n f.write('%d\\t' % (i))\n else:\n f.write('%.4f\\t' % (i))\n f.write('\\n')\n f.flush()\n\n if do_test_during_train and test_zero:\n evaluate_and_write_to_file()\n\n while episode < self.max_iterate:\n while not Broodwar.isInGame():\n client.update()\n if not client.isConnected():\n print(\"Reconnecting...\")\n reconnect()\n\n if self.very_fast:\n Broodwar.setLocalSpeed(0)\n Broodwar.setGUI(False)\n\n # Broodwar.sendText(\"black sheep wall\")\n\n #last_minimap_state = None\n last_states = {}\n last_actions = {}\n #last_nn_states = {}\n\n last_action_target = {}\n last_cool_downs = {}\n last_hit_points = {}\n last_positions = {}\n last_destroyed_own_count = 0\n last_destroyed_enemy_count = 0\n\n step = 0\n is_first = True\n last_frame_count = -1\n\n self.epsilon = self.epsilon_decay.get_epsilon(episode)\n last_score = 0\n\n while Broodwar.isInGame():\n events = Broodwar.getEvents()\n for e in events:\n eventtype = e.getType()\n if eventtype == cybw.EventType.MatchEnd:\n if (do_train):\n if self.algorithm == \"A2C\":\n print(\"Episode %d ended in %d steps\" % (episode + 1, step))\n else:\n print(\n \"Episode %d ended in %d steps, epsilon : %.4f\" % (episode + 1, step, self.epsilon))\n cur_score = get_score()\n if cur_score == 0 and len(Broodwar.enemy().getUnits()) > 0:\n cur_score = last_score\n print(\"Left enemy : %d, Score: %d\" % (len(Broodwar.enemy().getUnits()), cur_score))\n else:\n print(\"Episode %d ended in %d steps\" % (episode + 1, step))\n\n if e.isWinner():\n winEpisode += 1\n\n Broodwar.restartGame()\n\n elif eventtype == cybw.EventType.MatchFrame:\n if last_frame_count >= 0 and Broodwar.getFrameCount() - last_frame_count < step_frame:\n continue\n\n last_score = get_score()\n\n if Broodwar.getFrameCount() > max_frame:\n print(\"Time over\")\n # Broodwar.leaveGame()\n Broodwar.restartGame()\n\n last_frame_count = Broodwar.getFrameCount()\n # print('frame: ', last_frame_count)\n # print('destroyed:', last_destroyed_enemy_count, last_destroyed_own_count)\n r_d = last_destroyed_own_count * -10 + last_destroyed_enemy_count * 10\n last_destroyed_own_count = 0\n last_destroyed_enemy_count = 0\n\n minimap_state = self.observation.get_minimap_state()\n\n for u in Broodwar.self().getUnits():\n if not u.exists():\n continue\n\n spatial_state = self.observation.get_local_state(u)\n non_spatial_state = self.observation.get_non_spatial_state(u)\n state = (spatial_state, minimap_state, non_spatial_state)\n action = self.get_action(state, do_train)\n target = apply_action(u, action)\n\n if not is_first:\n r_a = reward_attack(u, last_hit_points[u.getID()], last_cool_downs[u.getID()], last_actions[u.getID()])\n r_m = reward_last_action(u, last_actions[u.getID()], last_positions[u.getID()], last_cool_downs[u.getID()])\n\n # r_m = reward_move(u, last_states[u.getID()], last_actions[u.getID()],\n # last_positions[u.getID()])\n\n reward = r_a + r_m\n\n if do_train:\n last_state = last_states[u.getID()]\n last_action = last_actions[u.getID()]\n\n sarsa = [last_state, last_action, reward, state, action, u.getID(), 0]\n self.socket.sendMessage(tag=\"sarsa\", msg=sarsa)\n tag, _ = self.socket.receiveMessage()\n assert tag == 'train finished'\n # agent.train_model(last_state, last_action, reward, state, action)\n last_states[u.getID()] = state\n # if not u.getID() in last_states:\n # last_states[u.getID()] = spatial_state\n # else:\n # np.copyto(last_states[u.getID()], spatial_state)\n\n last_actions[u.getID()] = action\n last_action_target[u.getID()] = target\n last_cool_downs[u.getID()] = u.getGroundWeaponCooldown()\n last_hit_points[u.getID()] = u.getHitPoints() + u.getShields()\n last_positions[u.getID()] = u.getPosition()\n\n step += 1\n is_first = False\n\n elif eventtype == cybw.EventType.UnitDestroy:\n if do_train:\n u = e.getUnit()\n if u.getPlayer().getID() == Broodwar.self().getID():\n reward = -20\n last_state = last_states[u.getID()]\n last_action = last_actions[u.getID()]\n\n sarsa = [last_state, last_action, reward, last_state, last_action, u.getID(), 1]\n self.socket.sendMessage(tag=\"sarsa\", msg=sarsa)\n tag, _ = self.socket.receiveMessage()\n assert tag == 'train finished'\n\n last_destroyed_own_count += 1\n else:\n last_destroyed_enemy_count += 1\n\n if self.visualize:\n #self.observation.draw_box()\n self.observation.draw_minimap()\n draw_action(last_actions, last_action_target)\n\n client.update()\n\n episode += 1\n self.total_iterate_counter += 1\n\n if not do_train:\n print(\"Win / Total : %d / %d, win rate : %.4f\" % (winEpisode, episode, winEpisode / episode))\n\n # if do_train:\n # if self.epsilon_decrease == \"LINEAR\":\n # self.epsilon = self.epsilon0 * (self.max_iterate - episode) / self.max_iterate\n # elif self.epsilon_decrease == \"EXPONENTIAL\":\n # self.epsilon *= self.epsilon_decay_rate\n # elif self.epsilon_decrease == \"INVERSE_SQRT\":\n # self.epsilon = self.epsilon0 / math.sqrt(1 + episode)\n\n if self.export_per != -1 and episode % self.export_per == 0:\n self.socket.sendMessage(tag=\"export\", msg=[episode])\n tag, _ = self.socket.receiveMessage()\n assert tag == \"export finished\"\n\n self.socket.sendMessage(tag=\"episode finished\", msg=[episode])\n\n if do_train and do_test_during_train and (episode % test_per == 0):\n evaluate_and_write_to_file()\n\n self.print_expected_time(time.time())\n\n if do_test_during_train:\n df = pd.DataFrame(results)\n df.columns = ['episode', 'epsilon', 'winrate', 'left_unit_avg', 'left_unit_stdev', 'score_avg',\n 'score_stdev']\n df.to_csv(self.get_file_name(extend='csv'), index=False)\n\n if self.export_per == -1:\n self.socket.sendMessage(tag=\"export\", msg=[episode])\n tag, _ = self.socket.receiveMessage()\n assert tag == \"export finished\"\n\n self.socket.sendMessage(tag=\"end connection\", msg=[11111])\n self.socket.close()","sub_path":"multi_agent/multi_agent_trainer_map.py","file_name":"multi_agent_trainer_map.py","file_ext":"py","file_size_in_byte":20032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48641552","text":"#对于类的知识我还是不够了解,只知道一切皆对象,对象就是现实中存在的真是的东西。\n#定义一个猫类\nclass Tiger:\n \"\"\"\n 判断类的实例用到的类\n \"\"\"\n pass\n\n\nclass Cat(object):\n \"\"\"猫科动物类\"\"\"\n tag = \"我是家猫\"\n #希望年龄保密\n def __init__(self,name,age,sex=None):\n self.name = name\n self.__age = age #两个下划线开头的表示私有的变量\n self.sex = sex\n #改变猫龄\n def set_age(self,age):\n self.__age = age\n\n #显示猫的信息\n def show_info(self):\n rest = ('我叫:{0},今年{1}岁'.format(self.name,self.__age))\n print('我的性别:{0}'.format(self.sex))\n print(rest)\n return rest\n\n def eat(self):\n \"\"\"吃\"\"\"\n print(\"猫喜欢吃鱼\")\n\n def catch(self):\n \"\"\"猫抓老鼠\"\"\"\n print(\"猫抓老鼠\")\n\n def jiao(self):\n \"\"\"猫叫\"\"\"\n print(\"猫喵喵喵的叫\")\n\n\nif __name__ == '__main__':\n #实例,我家的猫叫小黑\n cat_black = Cat(\"小黑\",\"2\",\"公的\")\n cat_black.eat()\n cat_black.show_info()\n print('------------------')\n print(cat_black.name)\n # print(cat_black.age)\n # print(cat_black.__age)\n print('------------------')\n #更改猫的名称\n cat_black.name = \"嘿嘿\"\n cat_black.show_info()\n\n #实例化我家小白\n print('xxxxxxxxxxxxxxxxxxx')\n cat_white = Cat(\"小白\",3,\"母的\")\n cat_white.show_info()\n print(cat_white.tag)\n print(cat_black.tag)\n\n\n\n #类的实例判断\n print(isinstance(cat_black,Cat))\n print(isinstance(cat_white,Cat))\n print(isinstance(cat_black,Tiger))\n print(isinstance(cat_white,Tiger))\n","sub_path":"步骤三:初识面向对象/day21/test_class_cat.py","file_name":"test_class_cat.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"606816426","text":"#-*- coding: utf-8 -*\n\"\"\"\nAdvanced Computational Mechanics Report 2\nSimultaneous equations solving with Gauss Seidel method\n\"\"\"\nimport time\n\n# ガウス・ザイデル法のクラス定義\nclass Gauss_Seidel():\n # 初期値の定義 Ax=b => [ A b ]\n def __init__(self):\n self.a = [\n [10, -1, 0, 0, 17],\n [-1, 10, -2, 0, 34],\n [ 0, -2, 10, -1, -38],\n [ 0, 0, -1, 10, 23]\n ]\n self.n = len(self.a)\n\n # ガウス・ザイデル法による計算\n def execution(self):\n self.__display_equations()\n self.x = [0] * self.n\n e = 0.0\n for i in range(self.n):\n p = 0\n for j in range(i):\n p = p + self.a[i][j] * self.x[j]\n p = p - self.a[i][self.n]\n e = e + abs(p)\n n = 0\n while e > 10 ** -6:\n n += 1\n print(\"\\n{:2d}: err = {:10f}\".format(n, e))\n for i in range(self.n):\n self.x[i] = self.a[i][self.n]\n for j in range(i):\n self.x[i] = self.x[i] - self.a[i][j] * self.x[j]\n for j in range(i+1, self.n):\n self.x[i] = self.x[i] - self.a[i][j] * self.x[j]\n self.x[i] = self.x[i] / self.a[i][i]\n e = 0.0\n for i in range(self.n):\n p = 0\n for j in range(self.n):\n p = p + self.a[i][j] * self.x[j]\n p = p - self.a[i][self.n]\n e = e + abs(p)\n for i in range(self.n):\n print(\" x{:d} = {:10f}\".format((i+1), self.x[i]))\n self.__display_answers()\n\n # 連立方程式の表示\n def __display_equations(self):\n print(\"equations\")\n for i in range(self.n):\n for j in range(self.n):\n print(\"{:+3d}x{:d} \".format(self.a[i][j], j + 1), end=\"\")\n print(\"= {:+d}\".format(self.a[i][self.n]))\n\n # 計算結果の表示\n def __display_answers(self):\n print(\"\\nanswers\")\n for i in range(self.n):\n print(\"x{:d} = {:9f}\".format(i+1, self.x[i]))\n\nif __name__ == \"__main__\":\n # 計算時間の計測\n start = time.time()\n\n obj = Gauss_Seidel()\n obj.execution()\n\n # 計算時間の表示\n time = time.time() - start\n print(\"\\ncalculate time: {:f} s\".format(time))\n","sub_path":"Report 2/Gauss_Seidel.py","file_name":"Gauss_Seidel.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"345797505","text":"from flask import (\n\t\tBlueprint, redirect, render_template,\n\t\tResponse, request, url_for , session\n\t\t)\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom project import db\nfrom project.forms import PostForm, AddArtForm\nfrom project.models import Post, User\n\n\npost_bp = Blueprint('add_post', __name__)\n\n@post_bp.route('/create_post', methods= ['GET', 'POST'])\n@login_required\ndef add_post():\n\tform = PostForm(request.form)\n\tif request.method == 'POST':\n\n\t\ttitle = form.title.data\n\t\ttext = form.text.data\n\n\t\ttest_result = test_add_post(form)\n\n\t\tif test_result == \"success\":\n\t\t\tpost = Post(current_user.id, title, text)\n\t\t\tdb.session.add(post)\n\t\t\tdb.session.commit()\n\t\t\treturn redirect(url_for(\"stories\"))\n\t\telse:\n\t\t\tdata = {'err_msg':test_result}\n\t\t\treturn render_template('add_post.html',form = form, data = data)\n\n\telse:\n\t\treturn render_template('add_post.html', form=form)\n\ndef test_add_post(form):\n\ttitle = form.title.data\n\ttext = form.text.data\n\tif not title or title == \"\":\n\t\treturn \"post has to have a title\"\n\tif not text or text == \"\":\n\t\treturn \"post has to have text\"\n\treturn \"success\"\n\n@post_bp.route('/add_art/', methods= ['POST'])\n@login_required\ndef add_art(post_id):\n\tform = AddArtForm(request.form)\n\tif request.method == 'POST':\n\t\tif form.validate_on_submit():\n\t\t\tpost = Post.query.filter_by(id = post_id).first()\n\t\t\tart_url = form.art_url.data\n\t\t\tpost.ArtURL = art_url\n\t\t\tpost.ArtistNotes = form.artist_notes.data\n\t\t\tpost.ArtistID = current_user.id\n\t\t\tdb.session.commit()\n\t\t\treturn redirect(url_for('feed')) \n\t\telse:\n\t\t\treturn Response(\"

invalid form

\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"project/add_post.py","file_name":"add_post.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416473137","text":"#!/usr/bin/env python3\nimport argparse\nimport datetime\nimport os\nimport re\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom uppercase_data import UppercaseData\n\n# Parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--alphabet_size\", default=500, type=int,\n help=\"If nonzero, limit alphabet to this many most frequent chars.\")\nparser.add_argument(\"--batch_size\", default=100, type=int, help=\"Batch size.\")\nparser.add_argument(\"--epochs\", default=5, type=int, help=\"Number of epochs.\")\nparser.add_argument(\"--hidden_layers\", default=\"500\", type=str, help=\"Hidden layer configuration.\")\nparser.add_argument(\"--threads\", default=4, type=int, help=\"Maximum number of threads to use.\")\nparser.add_argument(\"--window\", default=11, type=int, help=\"Window size to use.\")\nargs = parser.parse_args()\nargs.hidden_layers = [int(hidden_layer) for hidden_layer in args.hidden_layers.split(\",\") if hidden_layer]\n\n# Fix random seeds\nnp.random.seed(42)\ntf.random.set_seed(42)\ntf.config.threading.set_inter_op_parallelism_threads(args.threads)\ntf.config.threading.set_intra_op_parallelism_threads(args.threads)\n\n# Create logdir name\nargs.logdir = os.path.join(\"logs\", \"{}-{}-{}\".format(\n os.path.basename(__file__),\n datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M%S\"),\n \",\".join((\"{}={}\".format(re.sub(\"(.)[^_]*_?\", r\"\\1\", key), value) for key, value in sorted(vars(args).items())))\n))\n\n# Load data\nuppercase_data = UppercaseData(args.window, args.alphabet_size)\n\n# TODO: Implement a suitable model, optionally including regularization, select\n# good hyperparameters and train the model.\n#\n# The inputs are _windows_ of fixed size (`args.window` characters on left,\n# the character in question, and `args.window` characters on right), where\n# each character is representedy by a `tf.int32` index. To suitably represent\n# the characters, you can:\n# - Convert the character indices into _one-hot encoding_. There is no\n# explicit Keras layer, so you can\n# - use a Lambda layer which can encompass any function:\n# Sequential([\n# tf.layers.InputLayer(input_shape=[2 * args.window + 1], dtype=tf.int32),\n# tf.layers.Lambda(lambda x: tf.one_hot(x, len(uppercase_data.train.alphabet))),\n# - or use Functional API and a code looking like\n# inputs = tf.keras.layers.Input(shape=[2 * args.window + 1], dtype=tf.int32)\n# encoded = tf.one_hot(inputs, len(uppercase_data.train.alphabet))\n# You can then flatten the one-hot encoded windows and follow with a dense layer.\n# - Alternatively, you can use `tf.keras.layers.Embedding`, which is an efficient\n# implementation of one-hot encoding followed by a Dense layer, and flatten afterwards.\n\n\nhyper = {\n \"dropout\": 0.2,\n \"ls\": 0.1,\n \"samples\": 1000000\n}\n\n# # Create the model\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.InputLayer(input_shape=[2 * args.window + 1], dtype=tf.int32))\nmodel.add(tf.keras.layers.Lambda(lambda x: tf.one_hot(x, len(uppercase_data.train.alphabet))))\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dropout(hyper[\"dropout\"]))\nfor hidden_layer in args.hidden_layers:\n model.add(tf.keras.layers.Dense(hidden_layer, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dropout(hyper[\"dropout\"]))\nmodel.add(tf.keras.layers.Dense(2, activation=\"sigmoid\"))\n\nloss = None\nmetric = None\n\nif hyper[\"ls\"] > 0:\n loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=hyper[\"ls\"])\n metric = tf.keras.metrics.CategoricalAccuracy(name=\"accuracy\")\n train_labels = tf.keras.utils.to_categorical(uppercase_data.train.data[\"labels\"])\n dev_labels = tf.keras.utils.to_categorical(uppercase_data.dev.data[\"labels\"])\n test_labels = tf.keras.utils.to_categorical(uppercase_data.test.data[\"labels\"])\nelse:\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n metric = tf.keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\")\n train_labels = uppercase_data.train.data[\"labels\"]\n dev_labels = uppercase_data.dev.data[\"labels\"]\n test_labels = uppercase_data.test.data[\"labels\"]\n\nmodel.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss=loss,\n metrics=[metric],\n)\n\ntb_callback = tf.keras.callbacks.TensorBoard(args.logdir, update_freq=1000, profile_batch=1)\ntb_callback.on_train_end = lambda *_: None\n\nmodel.fit(\n uppercase_data.train.data[\"windows\"][:hyper[\"samples\"]], train_labels[:hyper[\"samples\"]],\n batch_size=args.batch_size, epochs=args.epochs,\n validation_data=(uppercase_data.dev.data[\"windows\"], dev_labels),\n callbacks=[tb_callback],\n)\n#\ntest_logs = model.evaluate(uppercase_data.test.data[\"windows\"], test_labels, batch_size=args.batch_size)\ntb_callback.on_epoch_end(1,\n dict((\"val_test_\" + metric, value) for metric, value in zip(model.metrics_names, test_logs)))\n\naccuracy = test_logs[model.metrics_names.index(\"accuracy\")]\n\n# model.save('path_to_my_model.h5')\n\nres = model.predict(uppercase_data.test.data[\"windows\"])\nreference_file_content = None\nwith open(\"uppercase_data_test.txt\", \"r\", encoding=\"utf-8\") as reference_file:\n reference_file_content = reference_file.read()\n\ntable = dict(enumerate(uppercase_data.test.alphabet))\n\nout_content = []\nprint(len(res), len(uppercase_data.test.data[\"windows\"]))\nfor pred, window, i in zip(res, uppercase_data.test.data[\"windows\"], range(len(res))):\n number = window[args.window]\n letter = table.get(number, reference_file_content[i])\n if pred[1] > pred[0]:\n letter = letter.capitalize()\n out_content.append(letter)\n\nwith open(\"uppercase_test.txt\", \"w\", encoding=\"utf-8\") as out_file:\n out_file.write(''.join(out_content))\n","sub_path":"03/uppercase.py","file_name":"uppercase.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46897398","text":"from django.test import TestCase\nfrom django.urls import reverse\n\nfrom .models import Post, User, Group\n\n\nclass PostsTest(TestCase):\n def get_urls(self, post):\n urls = [\n reverse('index'),\n reverse('profile', kwargs={'username': post.author.username}),\n reverse('post', kwargs={'username': post.author.username,\n 'post_id': post.id}),\n ]\n\n return urls\n\n def check_post_on_page(self, url, post):\n response = self.client.get(url)\n if 'paginator' in response.context:\n posts_list = response.context['paginator'].object_list\n self.assertEqual(len(posts_list), 1)\n self.assertEqual(posts_list[0], post)\n else:\n self.assertEqual(response.context['post'], post)\n\n def setUp(self):\n self.user = User.objects.create(username='testuser', password='12345')\n\n def test_profile_page_created(self):\n login = self.client.force_login(self.user)\n resp = self.client.get(reverse('profile', kwargs={'username': 'testuser'}))\n self.assertEqual(resp.status_code, 200)\n\n def test_authenticated_user_can_post(self):\n login = self.client.force_login(self.user)\n resp = self.client.post(reverse('new_post'), {'text': 'hello'})\n self.assertEqual(resp.status_code, 302)\n\n def test_unauthenticated_user_cant_post(self):\n resp = self.client.get(reverse('new_post'))\n self.assertRedirects(resp, '/auth/login/?next=/new/')\n\n def test_new_post_appears_on_pages(self):\n login = self.client.force_login(self.user)\n # resp = self.client.post(reverse('new_post'), data={'text': 'hello'})\n post = Post.objects.create(text='hello', author=self.user)\n urls = self.get_urls(post=post)\n for url in urls:\n self.check_post_on_page(url=url, post=post)\n\n def test_authenticated_user_can_edit_post(self):\n login = self.client.force_login(self.user)\n group = Group.objects.create(title='edit_group', slug='edit-group')\n post = Post.objects.create(text='hello', group=group, author=self.user)\n resp = self.client.post(reverse('post_edit', kwargs={\n 'username': post.author.username,\n 'post_id': post.id\n }), data={'text': 'edited text'}, follow=True)\n post = Post.objects.get(id=post.id)\n urls = self.get_urls(post=post)\n for url in urls:\n self.check_post_on_page(url, post)\n resp = self.client.get(reverse('group', kwargs={'slug': group.slug}))\n self.assertNotIn(post, resp.context['paginator'].object_list)","sub_path":"social/posts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"396615104","text":"import torch\nfrom torch.optim import Adam\nfrom torch_geometric.data import DataLoader\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom dataset.primitive_shapes import PrimitiveShapes\nfrom loss_function import ChamferDistLoss\nfrom full_network.full_nework import FullNetwork\n\nFROM_EPOCH = 40\nNB_EPOCHS = 100\nBATCH_SIZE = 5\nRESULT_PATH = \"D:/Documenten/Results/\"\nNAME = \"FullNetwork/\"\nSTART_LR = 0.001\nLR_NB = 1\nNB_POINTS = 3600\nTRAIN_SIZE = 50\nVAL_SIZE = 5\n\nprint(\"STARTING TRAINTNG\")\nprint(\"DATASET PREP\")\nprint(\"train data\")\ntrain_dataset = PrimitiveShapes.generate_dataset(\n TRAIN_SIZE, NB_POINTS,\n shapes=[True, True, True, True, True], normals=False\n)\nprint(\"validation data\")\nval_dataset = PrimitiveShapes.generate_dataset(\n VAL_SIZE, NB_POINTS,\n shapes=[True, True, True, True, True], normals=False\n)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE)\nval_loader = DataLoader(dataset=val_dataset, batch_size=BATCH_SIZE)\nprint(len(train_loader))\nprint(len(val_loader))\n\nloss_fn = ChamferDistLoss()\n\nfor lr in range(LR_NB):\n learning_rate = START_LR / max(1, (lr * 10))\n path = RESULT_PATH + NAME + \"LearningRate{}/\".format(round(learning_rate * 100000))\n print(learning_rate)\n print(round(learning_rate*100000))\n\n net = FullNetwork()\n train_losses = np.empty(0)\n val_losses = np.empty(0)\n if not FROM_EPOCH == 0:\n print('loaded net')\n net.load_state_dict(\n torch.load(path + \"model_epoch{}.pt\".format(FROM_EPOCH))\n )\n train_losses = np.load(path + \"trainloss_epoch{}.npy\".format(FROM_EPOCH))\n val_losses = np.load(path + \"valloss_epoch{}.npy\".format(FROM_EPOCH))\n optimizer = Adam(net.parameters(), lr=learning_rate, weight_decay=5e-4)\n\n net.train()\n for i in range(NB_EPOCHS + 1 - FROM_EPOCH):\n epoch = i + FROM_EPOCH\n print(epoch)\n temp_loss = []\n\n for batch in train_loader:\n optimizer.zero_grad()\n\n pos = batch.pos\n batch_inds = batch.batch\n\n points_out, batch_out = net(pos, batch_inds)\n loss = loss_fn(\n pos,\n points_out,\n batch_in=batch_inds,\n batch_out=batch_out\n )\n loss.backward()\n optimizer.step()\n temp_loss.append(loss.item())\n\n train_loss = sum(temp_loss) / len(train_loader)\n print(train_loss)\n train_losses = np.append(train_losses, train_loss)\n\n net.eval()\n temp_loss = []\n for val_batch in val_loader:\n pos = val_batch.pos\n batch_inds = val_batch.batch\n\n points_out, batch_out = net(pos, batch_inds)\n loss = loss_fn(\n pos,\n points_out,\n batch_in=batch_inds,\n batch_out=batch_out\n )\n temp_loss.append(loss.item())\n\n val_loss = sum(temp_loss) / len(val_loader)\n print(val_loss)\n val_losses = np.append(val_losses, val_loss)\n\n if epoch % 5 == 0:\n np.save(path + \"trainloss_epoch{}.npy\".format(epoch), train_losses)\n np.save(path + \"valloss_epoch{}.npy\".format(epoch), val_losses)\n torch.save(\n net.state_dict(),\n path + \"model_epoch{}.pt\".format(epoch)\n )\n plt.clf()\n x = range(len(train_losses))\n plt.plot(x, train_losses, x, val_losses)\n plt.legend(['train loss', 'validation loss'])\n plt.title('Simple Relative Layer train loss')\n plt.yscale('log')\n plt.savefig(\n path + \"loss_epoch{}.png\".format(epoch)\n )\n\n","sub_path":"full_network/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"45840091","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport sys, os\nfrom PyQt4 import QtGui, QtCore\nfrom subprocess import call, Popen\n\nconfigurationFile = os.getenv(\"HOME\") + '/.kadu-profiles'\n\nclass KaduProfiles(QtGui.QWidget):\n \n def __init__(self):\n super(KaduProfiles, self).__init__()\n self.profilesList = self.parseFile()\n self.initUI()\n\n def parseFile(self):\n profilesList = list()\n \n if os.path.exists(configurationFile):\n f = open(configurationFile, 'r')\n else:\n f = open(configurationFile, 'w+')\n\n for line in f:\n line = line.strip('\\r\\n')\n splited = line.split(':')\n profilesList.append(splited)\n \n \n f.close()\n return profilesList\n\n def initUI(self):\n self.setWindowTitle('Kadu - select profile')\n\n vbox = QtGui.QVBoxLayout()\n vbox.addStretch(1)\n self.setList()\n vbox.addWidget(self.qProfilesList)\n vbox.addLayout(self.setProfilesButtons())\n vbox.addLayout(self.setMainButtons())\n self.setLayout(vbox)\n\n self.center()\n\n self.show()\n\n def center(self):\n self.resize(500,250)\n qr = self.frameGeometry()\n cp = QtGui.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n \n def setProfilesButtons(self): \n hbox = QtGui.QHBoxLayout()\n hbox.addStretch(1)\n\n buttonRemove = QtGui.QPushButton('Remove profile', self)\n buttonRemove.setToolTip(\"Remove selected profile\")\n buttonRemove.clicked.connect(self.removeProfile)\n hbox.addWidget(buttonRemove)\n \n buttonEdit = QtGui.QPushButton('Edit profile', self)\n buttonEdit.setToolTip(\"Edit selected profile\")\n buttonEdit.clicked.connect(self.editProfile)\n hbox.addWidget(buttonEdit)\n \n buttonAdd = QtGui.QPushButton('Add profile', self)\n buttonAdd.setToolTip(\"Add new profile\")\n buttonAdd.clicked.connect(self.addNewProfile)\n hbox.addWidget(buttonAdd)\n\n return hbox\n\n\n def setList(self):\n self.qProfilesList = QtGui.QListWidget()\n \n for elem in self.profilesList:\n item = QtGui.QListWidgetItem(elem[0], self.qProfilesList)\n item.setToolTip(elem[1])\n self.qProfilesList.addItem(item)\n\n def setMainButtons(self):\n hbox = QtGui.QHBoxLayout()\n hbox.addStretch(1)\n\n buttonRun = QtGui.QPushButton('Run kadu', self)\n buttonExit = QtGui.QPushButton('Exit' , self)\n\n buttonExit.setToolTip('Exit')\n buttonExit.resize(buttonExit.sizeHint())\n buttonExit.clicked.connect(QtCore.QCoreApplication.instance().quit)\n\n buttonRun.setToolTip('Run kadu with selected profile')\n buttonRun.resize(buttonRun.sizeHint())\n buttonRun.clicked.connect(self.runKadu)\n \n hbox.addWidget(buttonExit)\n hbox.addWidget(buttonRun)\n \n return hbox\n \n def removeProfile(self):\n selectedProfile = self.qProfilesList.currentRow()\n \n if selectedProfile > -1:\n reply = QtGui.QMessageBox.question(self, 'Are you sure?', \"Are you sure, that you want to remove selected profile (directory with configuration stays)?\", QtGui.QMessageBox.Yes |QtGui.QMessageBox.No, QtGui.QMessageBox.No)\n \n if reply == QtGui.QMessageBox.Yes:\n self.qProfilesList.takeItem(self.qProfilesList.currentRow())\n del self.profilesList[selectedProfile]\n self.printToFile()\n \n def createDialog(self, title=\"Create new profile\"):\n dialog = QtGui.QDialog(self)\n dialog.setWindowTitle(title)\n dialog.resize(400,50)\n \n vbox = QtGui.QVBoxLayout()\n \n hbox1 = QtGui.QHBoxLayout()\n hbox2 = QtGui.QHBoxLayout()\n hbox3 = QtGui.QHBoxLayout()\n hbox3.addStretch(1)\n\n labelName = QtGui.QLabel(\"Name: \", dialog)\n inputName = QtGui.QLineEdit(dialog)\n hbox1.addWidget(labelName)\n hbox1.addWidget(inputName)\n \n labelDir = QtGui.QLabel(\"Directory: \", dialog)\n inputDir = QtGui.QLineEdit()\n\n def chooseFile():\n fileDialog = QtGui.QFileDialog(dialog)\n fileDialog.show()\n \n def fSelected(f):\n inputDir.setText(f)\n\n fileDialog.fileSelected.connect(fSelected)\n\n chooseButton = QtGui.QPushButton(\"Choose\")\n chooseButton.clicked.connect(chooseFile)\n\n hbox2.addWidget(labelDir)\n hbox2.addWidget(inputDir)\n hbox2.addWidget(chooseButton)\n\n okButton = QtGui.QPushButton(\"OK\", dialog)\n okButton.setToolTip(\"Save\")\n \n cancelButton = QtGui.QPushButton(\"Cancel\", dialog)\n cancelButton.setToolTip(\"Cancel\")\n\n def closeDialog():\n dialog.close()\n \n cancelButton.clicked.connect(closeDialog)\n hbox3.addWidget(cancelButton)\n hbox3.addWidget(okButton)\n\n \n vbox.addLayout(hbox1)\n vbox.addLayout(hbox2)\n vbox.addLayout(hbox3)\n dialog.setLayout(vbox)\n dialog.show()\n\n return (dialog, okButton, inputName, inputDir)\n \n def addNewProfile(self):\n _tuple = self.createDialog()\n dialog = _tuple[0]\n button = _tuple[1]\n inputName = _tuple[2]\n inputDir = _tuple[3]\n\n def add():\n name = inputName.text()\n directory = inputDir.text()\n \n if self.validate(name, directory):\n self.profilesList.append((name, directory))\n item = QtGui.QListWidgetItem(name, self.qProfilesList)\n item.setToolTip(directory)\n self.qProfilesList.addItem(item)\n self.printToFile()\n dialog.close()\n else:\n msgBox = QtGui.QMessageBox()\n msgBox.setText(\"Wrong profile name or directory\")\n msgBox.exec_()\n\n button.clicked.connect(add)\n\n def validate(self, name, directory):\n if str(name).strip(' ') == '' or str(directory).strip(' ') == '':\n return False\n for tmp in self.profilesList:\n if tmp[0] == name or tmp[1] == directory: \n return False\n\n return True\n\n def editProfile(self):\n selectedProfile = self.qProfilesList.currentRow()\n if selectedProfile > -1:\n _tuple = self.createDialog(\"Edit profile\")\n dialog = _tuple[0]\n button = _tuple[1]\n inputName = _tuple[2]\n inputDir = _tuple[3]\n\n inputName.setText(self.profilesList[selectedProfile][0])\n inputDir.setText(self.profilesList[selectedProfile][1])\n\n def saveEdit():\n if self.validateForEdit(selectedProfile, inputName.text(), inputDir.text()):\n self.profilesList[selectedProfile] = (inputName.text(), inputDir.text())\n item = self.qProfilesList.item(selectedProfile)\n item.setText(inputName.text())\n item.setToolTip(inputDir.text())\n self.printToFile()\n dialog.close()\n else: \n msgBox = QtGui.QMessageBox()\n msgBox.setText(\"Wrong profile name or directory\")\n msgBox.exec_()\n\n button.clicked.connect(saveEdit)\n\n def validateForEdit(self, selectedProfile, name, directory):\n if str(name).strip(' ') == '' or str(directory).strip(' ') == '': return False\n \n i = 0\n for profile in self.profilesList:\n if i != selectedProfile and (profile[0] == name or profile[1] == directory):\n return False\n i += 1\n\n return True\n \n def printToFile(self):\n s = ''\n for profile in self.profilesList:\n s += profile[0] + ':' + profile[1] + '\\n'\n\n f = open(configurationFile, 'w+')\n f.write(s)\n f.close()\n\n\n def runKadu(self):\n selectedProfile = self.qProfilesList.currentRow()\n tmp = self.profilesList [selectedProfile]\n directory = tmp[1]\n directory = str(directory).strip(' ')\n \n Popen([\"kadu\", '--config-dir', directory])\n sys.exit()\n\n \n\ndef main():\n app = QtGui.QApplication(sys.argv)\n\n ex = KaduProfiles()\n sys.exit(app.exec_())\n\n\nif __name__=='__main__':\n main()\n","sub_path":"kadu-profiles.py","file_name":"kadu-profiles.py","file_ext":"py","file_size_in_byte":8562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569475244","text":"reta1 = float(input('Informe o comprimento da reta 1 '))\nreta2 = float(input('Informe o comprimento da reta 2 '))\nreta3 = float(input('Informe o comprimenro da reta 3 '))\nif ((reta1 < (reta2 + reta3)) and (reta2 < (reta1 + reta3)) and (reta3 < (reta1 + reta2))):\n # MY WAY....\n # if ((reta1 != reta2) and (reta1 != reta3) and (reta2 != reta3)):\n # print('Essas retas formam um triângulo ESCALENO!')\n # elif ((reta1 == reta2) and (reta1 == reta3) and (reta2 == reta3)):\n # print('Essas retas forma um triângulo EQUILÁTERO!')\n # else:\n # print('Essas retas forma um triângulo ISÓCELES!')\n\n # TEACHERS WAY (BETTER) Python accepts this.\n if ((reta1 != reta2 != reta3 != reta1)):\n print('Essas retas formam um triângulo ESCALENO!')\n elif ((reta1 == reta2 == reta3)):\n print('Essas retas forma um triângulo EQUILÁTERO!')\n else:\n print('Essas retas forma um triângulo ISÓCELES!')\nelse:\n print('Os segmentos de reta não podem forma um triângulo.')\n","sub_path":"Exercises/World02/Aula12-Elif_NestedIf/ex042.py","file_name":"ex042.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"478808276","text":"#duas listas, uma com 5 nomes de carros e a outra com 5 cores, \n# só o que eu quero que vocês façam é associar cada carro a \n# uma cor, tipo o carro 2 é da cor 2, assim:\n#O carro1 é azul\n#O carro2 é vermelho\n\ncores = [\"Azul\", \"Vermelho\", \"Amarelo\", \"Verde\", \"Preto\"]\ncarros = [\"Celta\", \"Viper\", \"Lamborgini\", \"Ferrari\", \"Chevron\"]\nlista = zip(carros, cores)\n\nfor mostrarcarros, mostrarcores in lista: \n print(mostrarcarros, mostrarcores)\n","sub_path":"Aula 8/ExercicioAula8.py","file_name":"ExercicioAula8.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"217822722","text":"#!/usr/bin/env python\n# Developer virtualenv setup for Certbot client\nimport os\nimport sys\n\nimport _venv_common\n\ndef create_venv(venv_path):\n \"\"\"Create a Python 2 virtual environment at venv_path.\n\n :param str venv_path: path where the venv should be created\n\n \"\"\"\n python2 = _venv_common.find_python_executable(2)\n command = [sys.executable, '-m', 'virtualenv', '--python', python2, venv_path]\n\n environ = os.environ.copy()\n environ['VIRTUALENV_NO_DOWNLOAD'] = '1'\n _venv_common.subprocess_with_print(command, environ)\n\n\ndef main(pip_args=None):\n if os.name == 'nt':\n raise ValueError('Certbot for Windows is not supported on Python 2.x.')\n\n venv_path = _venv_common.prepare_venv_path('venv')\n create_venv(venv_path)\n _venv_common.install_packages(venv_path, pip_args)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"tools/venv.py","file_name":"venv.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"424012339","text":"import mutagen\nfrom mutagen.mp3 import MP3\nfrom mutagen.id3 import TOPE, TSO2, TOA, TIT2, TSOT, ID3, APIC, error\nimport requests\nimport re\nimport urllib\nimport os\nfrom shutil import copyfile\nimport time\n\nart_location = 'temp.jpg'\nsongs_location = 'songs/'\n\nshould_raise = False\nshould_log = False\ndef log(message):\n if should_log:\n print(time.strftime('%a, %d %b %Y %H:%M:%S', time.gmtime()) + ': ' + message)\n\ndef full_song_name(song):\n return song['artist'] + ' - ' + song['title']\n\ndef get_filename_for_song(song):\n return songs_location + full_song_name(song) + '.mp3'\n\ndef cache_art(song):\n try:\n urllib.urlretrieve(song['soundcloud_art_url'], art_location)\n except:\n copyfile('default.jpg', art_location)\n \ndef edit_tags(song, filename):\n from mutagen.easyid3 import EasyID3\n try:\n audio = ID3(filename)\n audio.delete()\n meta = EasyID3(filename)\n except mutagen.id3.ID3NoHeaderError:\n from mutagen.mp4 import MP4\n from mutagen.mp3 import EasyMP3\n try:\n audio = MP4(filename)\n audio.delete()\n mutagen.mp4.delete(filename)\n audio.save()\n meta =EasyMP3(filename)\n except:\n meta = mutagen.File(filename, easy=True)\n \n meta.add_tags()\n meta['title'] = song['title']\n meta['artist'] = song['artist']\n meta.save(filename)\n\n audio = MP3(filename, ID3=ID3)\n\n try:\n audio.add_tags()\n except error:\n pass\n\n audio.tags.add(\n APIC(\n encoding=3,\n mime='image/jpg',\n type=3,\n desc=u'Cover',\n data=open(art_location).read()\n )\n )\n\n audio.tags.add(TOPE(3, song['artist']))\n audio.tags.add(TSO2(3, song['artist']))\n audio.tags.add(TOA(3, song['artist']))\n\n audio.tags.add(TIT2(3, song['title']))\n audio.tags.add(TSOT(3, song['title']))\n\n audio.save(v2_version=3)\n\n log('Finished formatting ' + song['artist'] + ' - ' + song['title'])","sub_path":"src/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"93704748","text":"from gevent import monkey; monkey.patch_all()\n\nimport os\nfrom six.moves.urllib.parse import quote\n\nfrom bottle import Bottle, request, redirect, debug, abort\n\nfrom rewriterapp import RewriterApp\n\n\n# ============================================================================\nclass WebRecApp(RewriterApp):\n DEF_REC_NAME = 'my_recording'\n\n PATHS = {'live': '{replay_host}/live/resource/postreq?url={url}&closest={closest}',\n 'record': '{record_host}/record/live/resource/postreq?url={url}&closest={closest}¶m.recorder.user={user}¶m.recorder.coll={coll}¶m.recorder.rec={rec}',\n 'replay': '{replay_host}/replay/resource/postreq?url={url}&closest={closest}¶m.replay.user={user}¶m.replay.coll={coll}¶m.replay.rec={rec}',\n 'replay_coll': '{replay_host}/replay-coll/resource/postreq?url={url}&closest={closest}¶m.user={user}¶m.coll={coll}'\n }\n\n def __init__(self):\n super(WebRecApp, self).__init__(True)\n\n self.app = Bottle()\n debug(True)\n\n self.record_host = os.environ.get('RECORD_HOST', 'http://localhost:8010')\n self.replay_host = os.environ.get('REPLAY_HOST', 'http://localhost:8080')\n\n self.init_routes()\n\n def init_routes(self):\n @self.app.route(['/record/', '/anonymous/record/'])\n def redir_anon_rec(wb_url):\n new_url = '/anonymous/{rec}/record/{url}'.format(rec=self.DEF_REC_NAME,\n url=wb_url)\n return redirect(new_url)\n\n @self.app.route(['/replay/'])\n def redir_anon_replay(wb_url):\n new_url = '/anonymous/{url}'.format(url=wb_url)\n return redirect(new_url)\n\n # LIVE DEBUG\n @self.app.route('/live/')\n def live(wb_url):\n request.path_shift(1)\n\n return self.render_anon_content(wb_url, rec='', type='live')\n\n # ANON ROUTES\n @self.app.route('/anonymous//record/')\n def anon_record(rec_name, wb_url):\n request.path_shift(3)\n\n return self.render_anon_content(wb_url, rec=rec_name, type='record')\n\n @self.app.route('/anonymous/')\n def anon_replay_coll(wb_url):\n request.path_shift(1)\n\n return self.render_anon_content(wb_url, rec='*', type='replay')\n\n @self.app.route('/anonymous//')\n def anon_replay(rec_name, wb_url):\n request.path_shift(2)\n\n return self.render_anon_content(wb_url, rec=rec_name, type='replay')\n\n #@self.app.route('/static/')\n #def serve_static(filename):\n # return static_file(filename, root='/path/to/static/files')\n\n @self.app.error(404)\n def not_found(error):\n if isinstance(error.exception, dict):\n msg = 'The url {url} was not found in the archive'\n msg = msg.format(url=error.exception['url'])\n else:\n msg = 'Url Not Found'\n\n return msg\n\n\n def render_anon_content(self, wb_url, rec, type):\n user = 'anon'\n coll = 'anonymous'\n return self.render_content(wb_url, user=user,\n coll=coll,\n rec=rec,\n type=type)\n\n def get_upstream_url(self, url, closest, kwargs):\n type = kwargs['type']\n upstream_url = self.PATHS[type].format(url=quote(url),\n closest=closest,\n record_host=self.record_host,\n replay_host=self.replay_host,\n **kwargs)\n\n return upstream_url\n\n def _add_custom_params(self, cdx, kwargs):\n type = kwargs['type']\n if type in ('live', 'record'):\n cdx['is_live'] = 'true'\n\n\n# ============================================================================\napplication = WebRecApp().app\n\n\n","sub_path":"app/webrecapp.py","file_name":"webrecapp.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"508831794","text":"import logging\nimport dramatiq\nfrom datetime import datetime\nfrom dramatiq.brokers.redis import RedisBroker\n\nfrom app.settings import REDIS, DELAY\nfrom common.net import get_page\nfrom mongo import sync_db, get_sync_db\nfrom mongo.models import Result, Counter\n\n\ndramatiq.set_broker(RedisBroker(host=REDIS['HOST'], port=REDIS['PORT']))\n\nsync_db.connect_to_database()\ndatabase = get_sync_db()\n\nlogger = logging.getLogger()\n\n\n@dramatiq.actor(actor_name='count_ads', max_retries=3)\ndef count_ads(counter: dict):\n counter_still_exist = database.has_counter(counter['id'])\n if counter_still_exist:\n count, top_ads = get_page(counter['phrase'], counter['region_id'])\n result_id: str = database.add_result(\n Result(\n counter_id=str(counter['id']),\n count=count,\n top_ads=top_ads,\n timestamp=int(datetime.now().timestamp())\n )\n )\n logger.info(f'counter_id: {str(counter[\"id\"])}, result_id: {result_id}, count: {count}')\n count_ads.send_with_options(args=(counter,), delay=DELAY)\n","sub_path":"src/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"542048269","text":"from flask import Flask, render_template\nimport psutil\nimport json\nimport time\nfrom flask_socketio import SocketIO, emit, send\n\napp = Flask(__name__)\nsocket = SocketIO(app)\nconnected=True\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@socket.on('connect')\ndef connection_handler():\n print('Connected')\n\n@socket.on('disconnect')\ndef disconnect_handler():\n print('Disconnected')\n connected=False\n\n@socket.on('handshake')\ndef handshake_handler(handshake):\n while(connected):\n socket.emit('performance_stats', \n {'cpu_usage' :psutil.cpu_percent(), \n 'memory': psutil.virtual_memory()})\n socket.sleep(1)\n\nif __name__ == '__main__':\n socket.run(app, debug=True)\n","sub_path":"resource_monitor.py","file_name":"resource_monitor.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22359075","text":"import time\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\nfrom Tools.tool_requests.getter import GETTER\nfrom Tools.DataBase.db import ConnMysql\nfrom Tools.BloomFilterOnRedis import BloomFilter\n\n\nclass MiMi(object):\n\n def __init__(self):\n \"\"\"秘密代理的IP抓取\"\"\"\n self.getter = GETTER(rtimes=10)\n self.cm = ConnMysql()\n self.bf = BloomFilter(key='allip')\n self.url = [\n \"http://www.mimiip.com/gngao/{}\", # 高匿代理IP\n \"http://www.mimiip.com/gnpu/{}\", # 普匿代理IP\n \"http://www.mimiip.com/gntou/{}\", # 透明代理IP\n \"http://www.mimiip.com/hw/{}\" # 国外代理IP\n ]\n\n def parser(self, page_lx):\n page = 1\n while True:\n try:\n html = self.getter.rget_data(page_lx.format(page))\n except Exception as e:\n print(\"出先错误为{}\".format(e))\n continue\n time.sleep(2) # 睡两秒,防止被干掉\n next_page = etree.HTML(html).xpath('//div[@class=\"pagination\"]//*[text()=\"下一页 ›\"]/@href')\n soup = BeautifulSoup(html, 'lxml')\n proxies_list = soup.find('table', 'list').find_all('tr')\n sql_list = list()\n for proxy in proxies_list:\n temp = proxy.find_all('td')\n if temp:\n # 获取ip\n ip = temp[0].get_text()\n # 获取端口\n port = temp[1].get_text()\n # 获取类型\n lx = temp[4].get_text().lower()\n # 校验是否已有\n if not self.bf.isContains(ip):\n sql_list.append(\"\"\"INSERT INTO allip (`ip`, `port`, `type`) VALUES ('{}', '{}', '{}')\"\"\".format(ip, port, lx))\n self.bf.insert(ip)\n else:\n pass\n for sql in sql_list: # 一次性操作数据库\n self.cm.exe(sql)\n if next_page:\n page += 1\n else:\n break\n\n def run(self):\n for page_lx in self.url:\n time.sleep(2)\n self.parser(page_lx)\n\n\nif __name__ == '__main__':\n mm = MiMi()\n mm.run()\n\n# 秘密代理的爬去,无加密直接储存 爬多了,会被封的.\n","sub_path":"ProxyHome/myProxies/spiders/mimiip.py","file_name":"mimiip.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173702676","text":"# Converts tracts.csv into a format more readable by R\n\nimport csv\nimport cpi\n\ndef inflate(num):\n return cpi.inflate(num, 2000, to=2016)\n\ncodes = []\n\n# Get list of valid metropolitan areas\nwith open('../data/msa-2003.csv', mode='r', encoding = \"ISO-8859-1\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n # if row[\"Metropolitan/Micropolitan Statistical Area\"] == \"Metropolitan Statistical Area\" and row[\"Central/Outlying County\"] == \"Central\":\n if int(row[\"Status\"]) == 1:\n # codes.append(row[\"FIPS State Code\"].zfill(2) + row[\"FIPS County Code\"].zfill(3))\n codes.append(row[\"FIPS\"].zfill(5))\n\ndef validCode(geoid):\n return True\n # for code in codes:\n # if geoid.startswith(code): return True\n #\n # return False\n\nvalid = 0\nnot_valid = 0\n\nwith open('../data/tracts.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n lines = []\n GEOID = 1001020100\n initial = []\n\n for row in csv_reader:\n if int(row[\"year\"]) == 2000:\n initial = row\n elif int(row[\"year\"]) == 2016:\n has_empty = False\n for key in row:\n if row[key] == \"\": row[key] = 0\n if initial[key] == \"\": initial[key] = 0\n\n if validCode(row[\"GEOID\"].zfill(11)): valid += 1\n else: not_valid += 1\n\n inflated_income = inflate(float(initial[\"median-household-income\"]))\n inflated_property_value = inflate(float(initial[\"median-property-value\"]))\n inflated_rent = inflate(float(initial[\"median-gross-rent\"]))\n\n # if not has_empty and (inflated_income != 0 and inflated_property_value != 0 and inflated_rent != 0) and validCode(row[\"GEOID\"].zfill(11)): lines.append({\n if not False and (inflated_income != 0 and inflated_property_value != 0 and inflated_rent != 0) and validCode(row[\"GEOID\"].zfill(11)): lines.append({\n \"GEOID\": row[\"GEOID\"],\n \"parent-location\": row[\"parent-location\"],\n \"name\": row[\"name\"],\n \"population\": float(row[\"population\"]) - float(initial[\"population\"]),\n \"poverty-rate\": float(row[\"poverty-rate\"]) - float(initial[\"poverty-rate\"]),\n \"renter-occupied-households\": float(row[\"renter-occupied-households\"]) - float(initial[\"renter-occupied-households\"]),\n \"pct-renter-occupied\": float(row[\"pct-renter-occupied\"]) - float(initial[\"pct-renter-occupied\"]),\n \"median-gross-rent\": 100 * (float(row[\"median-gross-rent\"]) - inflated_rent) / inflated_rent,\n \"median-household-income\": 100 * (float(row[\"median-household-income\"]) - inflated_income) / inflated_income,\n \"median-property-value\": 100 * (float(row[\"median-property-value\"]) - inflated_property_value) / inflated_property_value,\n \"rent-burden\": float(row[\"rent-burden\"]) - float(initial[\"rent-burden\"]),\n \"pct-white\": float(row[\"pct-white\"]) - float(initial[\"pct-white\"]),\n \"pct-af-am\": float(row[\"pct-af-am\"]) - float(initial[\"pct-af-am\"]),\n \"pct-hispanic\": float(row[\"pct-hispanic\"]) - float(initial[\"pct-hispanic\"]),\n \"pct-am-ind\": float(row[\"pct-am-ind\"]) - float(initial[\"pct-am-ind\"]),\n \"pct-asian\": float(row[\"pct-asian\"]) - float(initial[\"pct-asian\"]),\n \"pct-nh-pi\": float(row[\"pct-nh-pi\"]) - float(initial[\"pct-nh-pi\"]),\n \"pct-multiple\": float(row[\"pct-multiple\"]) - float(initial[\"pct-multiple\"]),\n \"pct-other\": float(row[\"pct-other\"]) - float(initial[\"pct-other\"]),\n \"eviction-filings\": float(row[\"eviction-filings\"]) - float(initial[\"eviction-filings\"]),\n \"evictions\": float(row[\"evictions\"]) - float(initial[\"evictions\"]),\n \"eviction-rate\": float(row[\"eviction-rate\"]) - float(initial[\"eviction-rate\"]),\n \"evictions-2016\": float(row[\"eviction-rate\"]),\n \"evictions-2000\": float(initial[\"eviction-rate\"]),\n \"eviction-filing-rate\": float(row[\"eviction-filing-rate\"]) - float(initial[\"eviction-filing-rate\"])\n })\n\n with open('../data/tracts-parsed.csv', mode='w') as csv_file:\n fieldnames = [\"GEOID\", \"name\", \"parent-location\", \"population\", \"poverty-rate\", \"renter-occupied-households\", \"pct-renter-occupied\", \"median-gross-rent\", \"median-household-income\", \"median-property-value\", \"rent-burden\", \"pct-white\", \"pct-af-am\", \"pct-hispanic\", \"pct-am-ind\", \"pct-asian\", \"pct-nh-pi\", \"pct-multiple\", \"pct-other\", \"eviction-filings\", \"evictions\", \"eviction-rate\", \"evictions-2016\", \"evictions-2000\", \"eviction-filing-rate\"]\n\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n\n writer.writeheader()\n for line in lines:\n writer.writerow(line)\n\nprint(valid)\nprint(not_valid)\n","sub_path":"parser/tracts-parser.py","file_name":"tracts-parser.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"494962261","text":"import os\nimport sys\nimport time\nfrom datetime import datetime\nfrom shutil import copyfile\nimport importlib.util\nfrom os.path import join\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import trange\nimport PIL.Image as pimg\n\nimport helper\nimport eval_helper\nfrom datasets.voc2012.dataset import Dataset\n\nnp.set_printoptions(linewidth=250)\n\nnum_classes = 21\nimg_name = '2011_001620'\nimg_dir = '/home/kivan/datasets/VOC2012/JPEGImages/'\nlabel_dir = '/home/kivan/datasets/voc2012_aug/data/'\n#split = 'val'\n\n#DATA_DIR = '/home/kivan/datasets/VOC2012/test_data'\n\ntf.app.flags.DEFINE_string('model_dir',\n '/home/kivan/datasets/results/voc2012/iou77_24_5_22-39-18', '')\nFLAGS = tf.app.flags.FLAGS\n\n\nhelper.import_module('config', os.path.join(FLAGS.model_dir, 'config.py'))\n\n\ndef forward_pass(model, save_dir):\n #file_path = join(DATA_DIR, 'ImageSets', 'Segmentation', 'test.txt')\n #fp = open(file_path)\n #file_list = [line.strip() for line in fp]\n\n save_dir_rgb = join(save_dir, 'rgb')\n tf.gfile.MakeDirs(save_dir_rgb)\n save_dir_pred = join(save_dir, 'pred')\n tf.gfile.MakeDirs(save_dir_pred)\n #sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement))\n config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)\n #config.gpu_options.per_process_gpu_memory_fraction = 0.5 # don't hog all vRAM\n #config.operation_timeout_in_ms = 5000 # terminate on long hangs\n #config.operation_timeout_in_ms = 15000 # terminate on long hangs\n sess = tf.Session(config=config)\n # Get images and labels.\n #run_ops = model.inference()\n\n batch_shape = (1, None, None, 3)\n image_tf = tf.placeholder(tf.float32, shape=batch_shape)\n labels_tf = tf.placeholder(tf.int32, shape=(1, None, None, 1))\n logits, aux_logits, loss = model.inference(image_tf, labels_tf, constant_shape=False,\n is_training=False)\n #is_training=True)\n img_grads = tf.gradients(loss, image_tf)\n\n #sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n latest = os.path.join(FLAGS.model_dir, 'model.ckpt')\n restorer = tf.train.Saver(tf.global_variables())\n restorer.restore(sess, latest)\n\n img_path = join(img_dir, img_name + '.jpg')\n label_path = join(label_dir, img_name + '.png')\n image = np.array(pimg.open(img_path)).astype(np.float32)\n image = image[np.newaxis,...]\n labels = np.array(pimg.open(label_path)).astype(np.int8)\n labels[labels==-1] = num_classes\n #labels[labels==-1] = 20\n labels = labels[np.newaxis,...,np.newaxis]\n\n while True:\n loss_val, logits_val, img_grads_val = sess.run([loss, logits, img_grads],\n feed_dict={image_tf:image, labels_tf:labels})\n print('loss = ', loss_val)\n img_grads_val = img_grads_val[0]\n print('grad norm = ', np.linalg.norm(img_grads_val))\n pred_labels = logits_val[0].argmax(2).astype(np.int32)\n save_path = os.path.join(save_dir_pred, img_name + '.png')\n eval_helper.draw_output(pred_labels, Dataset.class_info, save_path)\n\n labels_2d = labels[0,:,:,0]\n pred_labels[pred_labels != labels_2d] = -1\n #pred_labels[labels == num_classes] = -1\n num_correct = (pred_labels >= 0).sum()\n num_labels = np.sum(labels_2d != num_classes)\n image += 1e4 * img_grads_val\n #image += np.sign(img_grads_val)\n save_img = np.minimum(255, np.round(image[0]))\n save_img = np.maximum(0, save_img)\n save_img = save_img.astype(np.uint8)\n pil_img = pimg.fromarray(save_img)\n pil_img.save(join(save_dir_rgb, img_name + '.png'))\n # pred_img = pimg.fromarray(pred_labels)\n # pred_img.save(join(save_dir_submit, file_list[i] + '.png'))\n\n print('pixel accuracy = ', num_correct / num_labels * 100)\n print('press key...')\n input()\n\n # #pred_labels = logits_val[0].argmax(2).astype(np.int32)\n # pred_labels = logits_val[0].argmax(2).astype(np.uint8)\n # save_path = os.path.join(save_dir_rgb, file_list[i] + '.png')\n # eval_helper.draw_output(pred_labels, Dataset.class_info, save_path)\n # pred_img = pimg.fromarray(pred_labels)\n # pred_img.save(join(save_dir_submit, file_list[i] + '.png'))\n\n ##gt_labels = gt_labels.astype(np.int32, copy=False)\n #cylib.collect_confusion_matrix(net_labels.reshape(-1), gt_labels.reshape(-1), conf_mat)\n #gt_labels = gt_labels.reshape(net_labels.shape)\n #pred_labels = np.copy(net_labels)\n #net_labels[net_labels == gt_labels] = -1\n #net_labels[gt_labels == -1] = -1\n #num_mistakes = (net_labels >= 0).sum()\n #img_prefix = '%07d_'%num_mistakes + img_prefix\n\n #error_save_path = os.path.join(save_dir, str(loss_val) + img_prefix + '_errors.png')\n #filename = img_prefix + '_' + str(loss_val) + '_error.png'\n #error_save_path = os.path.join(save_dir, filename)\n #eval_helper.draw_output(net_labels, CityscapesDataset.CLASS_INFO, error_save_path)\n #print(q_size)\n #print(conf_mat)\n #img_names = [[x,y] for (y,x) in sorted(zip(loss_vals, img_names))]\n #sorted_data = [x for x in sorted(zip(loss_vals, img_names), reverse=True)]\n #print(img_names)\n #for i, elem in enumerate(sorted_data):\n # print('Xent loss = ', elem[0])\n # ski.io.imshow(os.path.join(save_dir, elem[1] + '_errors.png'))\n # ski.io.show()\n\n #print('')\n #pixel_acc, iou_acc, recall, precision, _ = eval_helper.compute_errors(\n # conf_mat, 'Validation', CityscapesDataset.CLASS_INFO, verbose=True)\n sess.close()\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n model = helper.import_module('model', os.path.join(FLAGS.model_dir, 'model.py'))\n\n if not tf.gfile.Exists(FLAGS.model_dir):\n raise ValueError('Net dir not found: ' + FLAGS.model_dir)\n save_dir = os.path.join(FLAGS.model_dir, 'evaluation', 'adversarial')\n tf.gfile.MakeDirs(save_dir)\n\n forward_pass(model, save_dir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n","sub_path":"OLD/tools/adversarial_examples.py","file_name":"adversarial_examples.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555014933","text":"import scrapy\nfrom SmzdmPhone.items import SmzdmphoneItem\nfrom SmzdmPhone.items import SmzdmphoneCommentsItem\nfrom snownlp import SnowNLP\nfrom scrapy.utils.project import get_project_settings\n\n\nclass SmzdmSpider(scrapy.Spider):\n name = 'smzdm'\n allowed_domains = ['smzdm.com']\n start_urls = [\n 'https://www.smzdm.com/fenlei/zhinengshouji/h5c4s0f0t0p1/#feed-main/']\n\n def parse(self, response):\n phones = response.xpath(\n '//*[@id=\"feed-main-list\"]/li') # [position()<11]/div/div[2]\n\n settings = get_project_settings()\n smzdm_comment_url = settings.get('SMZDM_COMMENT_URL')\n\n for phone in phones[:10]:\n items = SmzdmphoneItem()\n id_str = phone.xpath('./@articleid').extract()[0]\n # phone_id = '0'\n phone_id = id_str.split('_')[1]\n items['sid'] = phone_id\n items['title'] = phone.xpath(\n './div/div[2]/h5/a/text()')[0].extract()\n items['zhi'] = phone.xpath(\n './div/div[2]/div[@class=\"z-feed-foot\"]/div[1]/span/a[1]/span[1]/span/text()')[0].extract()\n items['buzhi'] = phone.xpath(\n './div/div[2]/div[@class=\"z-feed-foot\"]/div[1]/span/a[2]/span[1]/span/text()')[0].extract()\n items['star'] = phone.xpath(\n './div/div[2]/div[@class=\"z-feed-foot\"]/div[1]/a[1]/span/text()')[0].extract()\n items['comments'] = phone.xpath(\n './div/div[2]/div[@class=\"z-feed-foot\"]/div[1]/a[2]/span/text()')[0].extract()\n yield items\n\n comment_url = smzdm_comment_url + phone_id + '/'\n\n yield scrapy.Request(comment_url, callback=self.comments_parse, meta={'ID': phone_id})\n\n def comments_parse(self, response):\n ID = response.meta['ID']\n comment_list = response.xpath(\n '//div[@id=\"commentTabBlockNew\"]//div[@class=\"comment_conBox\"]')\n for comment in comment_list:\n items = SmzdmphoneCommentsItem()\n items['content'] = comment.xpath(\n './div[@class=\"comment_conWrap\"]/div[1]/p/span/text()')[0].extract()\n items['mark'] = SnowNLP(items['content']).sentiments\n items['sid'] = ID\n items['cid'] = comment.xpath(\n './div[@class=\"comment_conWrap\"]/div[1]/input/@comment-id')[0].extract()\n\n public_date = comment.xpath(\n './div[1]/div[1]/meta/@content')[0].extract()\n items['public_date'] = int(public_date.replace('-', ''))\n # items['public_date'] = '2020-01-12'\n yield items\n next_links = response.xpath(\n '//*[@id=\"commentTabBlockNew\"]//li[@class=\"pagedown\"]/a/@href').extract()\n if next_links and len(next_links) > 0:\n next_link = next_links[0]\n yield scrapy.Request(next_link, callback=self.comments_parse, meta={'ID': ID})\n","sub_path":"public_sentiment/SmzdmPhone/SmzdmPhone/spiders/smzdm.py","file_name":"smzdm.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433705259","text":"# This is a guess the number game.\r\nimport random\r\n\r\nguessTaken = 0\r\n\r\nprint(\"Hello! What is your name?\")\r\nmyName = input()\r\n\r\nnumber = random.randint(1, 20)\r\nprint(\"Well, {0}, I am thinking of a number between 1 and 20.\".format(myName))\r\n\r\nwhile guessTaken < 6:\r\n print(\"Take a guess.\")\r\n guess = int(input())\r\n guessTaken += 1\r\n\r\n if guess < number:\r\n print(\"Your guess is too low.\")\r\n elif guess > number:\r\n print(\"Your guess is too high.\")\r\n else:\r\n break\r\nif guess == number:\r\n print(\"Good job, {0}! You guessed my number in {1} guesses!\".format(myName, guessTaken))\r\nelse:\r\n print(\"Nope. The number I was thinking of was {}\".format(number))","sub_path":"guessnumber.py","file_name":"guessnumber.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"271427838","text":"from flask import request, redirect, abort, Response\nimport urllib.parse\n\n\nclass RMG_Devices_DeviceKey_Media():\n endpoints = [\"/rmg/devices//media/\"]\n endpoint_name = \"rmg_devices_devicekey_media\"\n endpoint_methods = [\"GET\"]\n\n def __init__(self, fhdhr):\n self.fhdhr = fhdhr\n\n def __call__(self, devicekey, channel, *args):\n return self.get(devicekey, channel, *args)\n\n def get(self, devicekey, channel, *args):\n\n param = request.args.get('method', default=None, type=str)\n self.fhdhr.logger.debug(\"param:%s\" % param)\n\n if not devicekey.startswith(self.fhdhr.config.dict[\"main\"][\"uuid\"]):\n response = Response(\"Not Found\", status=404)\n response.headers[\"X-fHDHR-Error\"] = \"801 - Unknown devicekey\"\n self.fhdhr.logger.error(response.headers[\"X-fHDHR-Error\"])\n abort(response)\n origin = devicekey.split(self.fhdhr.config.dict[\"main\"][\"uuid\"])[-1]\n\n redirect_url = \"/api/tuners?method=stream\"\n\n if str(channel).startswith('id://'):\n channel = str(channel).replace('id://', '')\n elif channel.startswith(\"triplet://\"):\n channel_tuple = channel.replace('triplet://', '').split(\":\")\n self.fhdhr.logger.error(\"Not Implemented %s\" % \":\".join(channel_tuple))\n abort(501, \"Not Implemented %s\" % \":\".join(channel_tuple))\n\n redirect_url += \"&channel=%s\" % (channel)\n redirect_url += \"&origin=%s\" % (origin)\n redirect_url += \"&stream_method=%s\" % self.fhdhr.origins.origins_dict[origin].stream_method\n\n redirect_url += \"&accessed=%s\" % urllib.parse.quote(request.url)\n\n return redirect(redirect_url)\n","sub_path":"plugins/fHDHR_plugin_interface_rmg/web/devices_devicekey_media.py","file_name":"devices_devicekey_media.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"87370479","text":"# coding=utf-8\nimport commands\n\nfrom settings import lrun_uid, lrun_gid\nfrom judge_exceptions import CompileError, JudgeClientError\nfrom utils import parse_lrun_output\nfrom logger import logger\n\n\ndef compile_(language_item, src_path, exe_path):\n compile_command = language_item[\"compile_command\"].format(src_path=src_path, exe_path=exe_path)\n\n # 防止编译器卡死 或者 include 等\n execute_command = \"lrun\" + \\\n \" --max-real-time 5\" + \\\n \" --uid \" + str(lrun_uid) + \\\n \" --gid \" + str(lrun_gid) + \\\n \" \" + \\\n compile_command + \\\n \" 3>&2\"\n status, output = commands.getstatusoutput(execute_command)\n\n output_start = output.rfind(\"MEMORY\")\n\n if output_start == -1:\n logger.error(\"Compiler error\")\n logger.error(output)\n raise JudgeClientError(\"Error running compiler in lrun\")\n\n # 返回值不为 0 或者 stderr 中 lrun 的输出之前有 erro r字符串\n # 判断 error 字符串的原因是链接的时候可能会有一些不推荐使用的函数的的警告,\n # 但是 -w 参数并不能关闭链接时的警告\n if status or \"error\" in output[0:output_start]:\n raise CompileError(output[0:output_start])\n\n parse_result = parse_lrun_output(output[output_start:])\n\n if parse_result[\"exit_code\"] or parse_result[\"term_sig\"] or parse_result[\"siginaled\"] or parse_result[\"exceed\"]:\n logger.error(\"Compiler error\")\n logger.error(output)\n raise CompileError(\"Compile error\")\n return exe_path\n","sub_path":"judge/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"446770791","text":"import dlib\nimport cv2\n\n#add face detector and object tracker\ndetector = dlib.get_frontal_face_detector()\ntracker = dlib.correlation_tracker()\ndetection = dlib.full_object_detection()\n\n#add gui\nwin = dlib.image_window()\n\n#add camera capture\ncap = cv2.VideoCapture(1)\n\n#declare bounding box for face\nface = dlib.rectangle()\n\ndef findFaces(image):\n\tdets = detector(image, 1)\n\tprint(\"Number of faces detected: {}\".format(len(dets)))\n\tfor k, d in enumerate(dets):\n\t\twin.clear_overlay()\n\t\twin.add_overlay(d)\n\t\tprint(\"Detection: Left: {} Top: {} Right: {} Bottom: {}\".format(\n d.left(), d.top(), d.right(), d.bottom()))\n\t\tface = dlib.rectangle(d.left(), d.top(), d.right(), d.bottom())\n\t\tdetection = d\n\t\t\nif cap.isOpened():\n\trval, frameRaw = cap.read()\n\tframe = cv2.cvtColor(frameRaw, cv2.COLOR_BGR2RGB)\n\twin.set_image(frame)\n\tfindFaces(frame)\n\t#win.clear_overlay()\n\tface = detection.rect\n\t#win.add_overlay(face)\n\tprint(face.height())\n\tdlib.hit_enter_to_continue()\n","sub_path":"cvStuff/faceTracker.py","file_name":"faceTracker.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"553671757","text":"# import custom JS animator\nfrom mlrefined_libraries.JSAnimation_slider_only import IPython_display_slider_only\n\n# import standard plotting and animation\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# import autograd functionality\nfrom autograd import grad as compute_grad # The only autograd function you may ever need\nimport autograd.numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n# import autograd functionality\nfrom autograd import grad as compute_grad # The only autograd function you may ever need\nimport autograd.numpy as np\nimport math\n\nclass visualizer:\n '''\n This file illlustrates the convex sum of two functions in 3d. Both functions are defined by the user.\n ''' \n\n # animate the method\n def draw_it(self,**args):\n # user input functions to add\n self.g1 = args['g1'] # input function\n self.g2 = args['g2'] # input function\n num_slides = 100\n if 'num_slides' in args:\n num_slides = args['num_slides']\n \n # turn axis on or off\n set_axis = 'on'\n if 'set_axis' in args:\n set_axis = args['set_axis']\n \n # set viewing angle on plot\n view = [20,50]\n if 'view' in args:\n view = args['view']\n \n # initialize figure\n fig = plt.figure(figsize = (15,5))\n artist = fig\n ax1 = fig.add_subplot(131,projection='3d')\n ax2 = fig.add_subplot(132,projection='3d')\n ax3 = fig.add_subplot(133,projection='3d')\n \n # generate input range for functions\n r = np.linspace(-3,3,200)\n w1_vals,w2_vals = np.meshgrid(r,r)\n w1_vals.shape = (len(r)**2,1)\n w2_vals.shape = (len(r)**2,1)\n g1_vals = self.g1([w1_vals,w2_vals])\n g2_vals = self.g2([w1_vals,w2_vals])\n \n # vals for cost surface\n w1_vals.shape = (len(r),len(r))\n w2_vals.shape = (len(r),len(r))\n g1_vals.shape = (len(r),len(r))\n g2_vals.shape = (len(r),len(r))\n\n # decide on number of slides\n alpha_vals = np.linspace(1,0,num_slides)\n\n # animation sub-function\n def animate(t):\n # clear panels for next slide\n ax1.cla()\n ax2.cla()\n ax3.cla()\n \n # plot function 1\n ax1.plot_surface(w1_vals,w2_vals,g1_vals,alpha = 0.1,color = 'k',rstride=10, cstride=10,linewidth=2,edgecolor = 'k') \n ax1.set_title(\"$g_1$\",fontsize = 15)\n ax1.view_init(view[0],view[1])\n ax1.axis(set_axis)\n\n # plot function 2\n ax2.plot_surface(w1_vals,w2_vals,g2_vals,alpha = 0.1,color = 'k',rstride=10, cstride=10,linewidth=2,edgecolor = 'k') \n ax2.set_title(\"$g_2$\",fontsize = 15)\n ax2.view_init(view[0],view[1])\n ax2.axis(set_axis)\n \n # plot combination of both\n alpha = alpha_vals[t]\n g_combo = alpha*g1_vals + (1 - alpha)*g2_vals\n ax3.plot_surface(w1_vals,w2_vals,g_combo,alpha = 0.1,color = 'k',rstride=10, cstride=10,linewidth=2,edgecolor = 'k') \n ax3.set_title('$\\\\alpha\\,g_1 + (1 - \\\\alpha)\\,g_2$',fontsize = 15)\n ax3.view_init(view[0],view[1])\n ax3.axis(set_axis)\n \n return artist,\n\n anim = animation.FuncAnimation(fig, animate ,frames=num_slides, interval=num_slides, blit=True)\n\n return(anim)","sub_path":"mlrefined_libraries/basics_library/convex_function_addition_3d.py","file_name":"convex_function_addition_3d.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"353765013","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ibo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ibo_number', models.CharField(max_length=120, verbose_name=b'IBO Number')),\n ('email', models.EmailField(max_length=254, verbose_name=b'Email')),\n ('phone', phonenumber_field.modelfields.PhoneNumberField(max_length=128, verbose_name=b'Phone')),\n ],\n options={\n 'ordering': ['ibo_number'],\n 'verbose_name': 'ibo',\n 'verbose_name_plural': 'ibos',\n },\n ),\n ]\n","sub_path":"Validate_Users/apps/users/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574664229","text":"# -*- coding: utf-8 -*-\n\"\"\"Implementation of insertion sort.\"\"\"\n\nimport random\nimport itertools\n\n\ndef insertion_sort(x):\n \"\"\"Sort a list using insertion sort.\n\n Arguments:\n x (list) -- the list to sort\n\n Return:\n x (list) -- the list sorted\n \"\"\"\n\n for i in range(1, len(x)):\n j = i\n while j > 0 and x[j] < x[j-1]:\n x[j], x[j-1] = x[j-1], x[j]\n j -= 1\n\n return x\n\n\ndef main():\n \"\"\"The main function. Used to test the other functions.\"\"\"\n\n # Create an unsorted list.\n random.seed()\n x = []\n for _ in itertools.repeat(None, 20):\n x.append(int(100 * random.random()))\n\n print(\"\\nUnsorted list\")\n print(x)\n\n print(\"\\nInsertion sort:\")\n print(insertion_sort(x))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"algorithms/sorting/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"388126758","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 10 09:46:06 2019\n\n@author: user\n\"\"\"\nimport numpy as np\nimport polecad as pc\n\n\n#direction wind is blowing\nnorthwind=(3*np.pi)/2\nnorthwestwind=(7*np.pi)/4\nwestwind=0\nsouthwestwind=(1*np.pi)/4\nsouthwind=np.pi/2\nsoutheastwind=(7*np.pi)/4\neastwind=np.pi\nnortheastwind=(5*np.pi)/4\n\nprojectname='Test2'\nprojectdir='./'+projectname+'/'\n\ndesignTension=0.5\nstrengthFactor=0.85\nwindLoad=9\nwindOverload=1.75\ntensionOverload=1.3\niceThickness=0.0\nwindAngle=(3*np.pi)/2\ndeflectionFactor=1.2\n\npcc=pc.poleCalculationClass()\npcc.poleLaunch(projectname,projectdir,designTension,strengthFactor,windLoad,windOverload,tensionOverload,iceThickness,windAngle,deflectionFactor)","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400433073","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.keys import Keys\nimport requests,json\nfirefox_options = Options()\nfirefox_options.add_argument(\"--headless\")\n\nheaders = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",'content-type':'application/json'}\ndriver = webdriver.Firefox(firefox_options=firefox_options)\ndriver.get('https://www.baidu.com')\nelem = driver.find_element_by_name(\"wd\") # 找到输入框的元素\nelem.clear() # 清空输入框里的内容\nelem.send_keys(u\"天气深圳\") # 在输入框中输入'Kali Linux'\nelem.send_keys(Keys.RETURN) # 在输入框中输入回车键\ndriver.implicitly_wait(10) # 隐式等待\ntqtoday = driver.find_element_by_css_selector('.op_weather4_twoicon_today')\ntqelemtitle = driver.find_element_by_css_selector('.c-gap-bottom-small a').text\ntime = tqtoday.find_element_by_css_selector('.op_weather4_twoicon_date').text\nwd = tqtoday.find_element_by_css_selector('.op_weather4_twoicon_temp').text\nweath = tqtoday.find_element_by_css_selector('.op_weather4_twoicon_weath').text\nwind = tqtoday.find_element_by_css_selector('.op_weather4_twoicon_wind').text\n\n\ndata = { \"msgtype\": \"text\",\n \"text\": {\n \"content\": tqelemtitle+'\\n'+time+'\\n'+wd+'\\n'+weath+'\\n'+wind\n },\n }\nurl = 'https://oapi.dingtalk.com/robot/send?access_token=684d740d4e1cf126eb24632a8c9d46591f517a4cdf1ea4176a5a4c01ce4f705e'\nres = requests.post(url,data=json.dumps(data),headers=headers)\n\n","sub_path":"pengfuweb/tq2.py","file_name":"tq2.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454484665","text":"from howtrader.app.cta_strategy import (\n CtaTemplate,\n StopOrder\n)\n\nfrom howtrader.trader.object import TickData, BarData, TradeData, OrderData\nfrom howtrader.app.cta_strategy.engine import CtaEngine\nfrom howtrader.trader.object import Status, Direction, ContractData, AccountData\nfrom howtrader.trader.utility import ArrayManager, BarGenerator\nfrom typing import Optional\nfrom decimal import Decimal\n\n\nclass MartingleFutureStrategyV2(CtaTemplate):\n \"\"\"\n 1. 马丁策略.\n 币安邀请链接: https://www.binancezh.pro/cn/futures/ref/51bitquant\n 币安合约邀请码:51bitquant\n \"\"\"\n\n \"\"\"\n 1. 开仓条件是 最高价回撤一定比例 4%\n 2. 止盈2%\n 3. 加仓: 入场后, 价格最低下跌超过5%, 最低点反弹上去1%, 那么就可以加仓. 均价止盈2%.\n \"\"\"\n author = \"51bitquant\"\n\n # 策略的核心参数.\n donchian_window = 2880 # two days\n open_pos_when_drawdown_pct = 0.04 # 最高值回撤2%时开仓.\n\n dump_down_pct = 0.04 #\n bounce_back_pct = 0.01 #\n\n exit_profit_pct = 0.02 # 出场平仓百分比 2%\n initial_trading_value = 1000 # 首次开仓价值 1000USDT.\n trading_value_multiplier = 1.3 # 加仓的比例.\n max_increase_pos_times = 7 # 最大的加仓次数\n trading_fee = 0.00075\n\n # 变量\n avg_price = 0.0 # 当前持仓的平均价格.\n last_entry_price = 0.0 # 上一次入场的价格.\n current_pos = 0.0 # 当前的持仓的数量.\n current_increase_pos_times = 0 # 当前的加仓的次数.\n\n upband = 0.0\n downband = 0.0\n entry_lowest = 0.0 # 进场之后的最低价.\n\n # 统计总的利润.\n total_profit = 0\n\n parameters = [\"donchian_window\", \"open_pos_when_drawdown_pct\", \"dump_down_pct\", \"bounce_back_pct\",\n \"exit_profit_pct\", \"initial_trading_value\",\n \"trading_value_multiplier\", \"max_increase_pos_times\", \"trading_fee\"]\n\n variables = [\"avg_price\", \"last_entry_price\", \"current_pos\", \"current_increase_pos_times\",\n \"upband\", \"downband\", \"entry_lowest\", \"total_profit\"]\n\n def __init__(self, cta_engine: CtaEngine, strategy_name, vt_symbol, setting):\n \"\"\"\"\"\"\n super().__init__(cta_engine, strategy_name, vt_symbol, setting)\n\n self.last_filled_order: Optional[OrderData, None] = None\n self.tick: Optional[TickData, None] = None\n self.contract: Optional[ContractData, None] = None\n self.account: Optional[AccountData, None] = None\n self.bg = BarGenerator(self.on_bar) # generate 1min bar.\n self.am = ArrayManager(3000) # default is 100, we need 3000\n\n # self.cta_engine.event_engine.register(EVENT_ACCOUNT + 'BINANCE.币名称', self.process_acccount_event)\n # self.cta_engine.event_engine.register(EVENT_ACCOUNT + \"BINANCE.USDT\", self.process_account_event)\n\n self.buy_orders = [] # 买单id列表。\n self.sell_orders = [] # 卖单id列表。\n self.min_notional = 11 # 最小的交易金额.\n\n def on_init(self):\n \"\"\"\n Callback when strategy is inited.\n \"\"\"\n self.write_log(\"策略初始化\")\n self.load_bar(3) # 加载3天的数据.\n\n def on_start(self):\n \"\"\"\n Callback when strategy is started.\n \"\"\"\n self.write_log(\"策略启动\")\n\n def on_stop(self):\n \"\"\"\n Callback when strategy is stopped.\n \"\"\"\n self.write_log(\"策略停止\")\n\n # def process_account_event(self, event: Event):\n # self.account: AccountData = event.data\n # if self.account:\n # print(\n # f\"self.account: available{self.account.available}, balance:{self.account.balance}, frozen: {self.account.frozen}\")\n\n def on_tick(self, tick: TickData):\n \"\"\"\n Callback of new tick data update.\n \"\"\"\n if tick.bid_price_1 > 0 and tick.ask_price_1 > 0:\n self.tick = tick\n self.bg.update_tick(tick)\n\n def on_bar(self, bar: BarData):\n \"\"\"\n Callback of new bar data update.\n \"\"\"\n am = self.am\n am.update_bar(bar)\n if not am.inited:\n return\n\n current_close = am.close_array[-1]\n current_low = am.low_array[-1]\n\n self.upband, self.downband = am.donchian(self.donchian_window, array=False) # 返回最新的布林带值.\n\n dump_pct = self.upband / current_low - 1\n\n if self.entry_lowest > 0:\n self.entry_lowest = min(self.entry_lowest, bar.low_price)\n\n # 回调一定比例的时候.\n if self.current_pos * current_close < self.min_notional:\n # 每次下单要大于等于10USDT, 为了简单设置11USDT.\n if dump_pct >= self.open_pos_when_drawdown_pct and len(self.buy_orders) == 0:\n # 这里没有仓位.\n # 重置当前的数据.\n self.cancel_all()\n self.current_increase_pos_times = 0\n self.avg_price = 0\n self.entry_lowest = 0\n\n price = current_close\n vol = self.initial_trading_value / price\n orderids = self.buy(Decimal(price), Decimal(vol))\n self.buy_orders.extend(orderids) # 以及已经下单的orderids.\n else:\n if len(self.sell_orders) <= 0 < self.avg_price:\n # 有利润平仓的时候\n # 清理掉其他买单.\n\n profit_percent = bar.close_price / self.avg_price - 1\n if profit_percent >= self.exit_profit_pct:\n self.cancel_all()\n orderids = self.short(Decimal(bar.close_price), Decimal(abs(self.current_pos)))\n self.sell_orders.extend(orderids)\n\n if self.entry_lowest > 0 >= len(self.buy_orders):\n # 考虑加仓的条件: 1) 当前有仓位,且仓位值要大于11USDTyi以上,2)加仓的次数小于最大的加仓次数,3)当前的价格比上次入场的价格跌了一定的百分比。\n\n dump_down_pct = self.last_entry_price / self.entry_lowest - 1\n bounce_back_pct = bar.close_price / self.entry_lowest - 1\n\n if self.current_increase_pos_times <= self.max_increase_pos_times and dump_down_pct >= self.dump_down_pct and bounce_back_pct >= self.bounce_back_pct:\n # ** 表示的是乘方.\n self.cancel_all() # 清理其他卖单.\n increase_pos_value = self.initial_trading_value * self.trading_value_multiplier ** self.current_increase_pos_times\n # if self.account and self.account.available >= increase_pos_value:\n price = bar.close_price\n vol = increase_pos_value / price\n orderids = self.buy(Decimal(price), Decimal(vol))\n self.buy_orders.extend(orderids)\n\n self.put_event()\n\n def on_order(self, order: OrderData):\n \"\"\"\n Callback of new order data update.\n \"\"\"\n if order.status == Status.ALLTRADED:\n if order.direction == Direction.LONG:\n # 买单成交.\n\n self.current_increase_pos_times += 1\n self.last_entry_price =float(order.price) # 记录上一次成绩的价格.\n self.entry_lowest = float(order.price)\n\n if not order.is_active():\n if order.vt_orderid in self.sell_orders:\n self.sell_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.buy_orders:\n self.buy_orders.remove(order.vt_orderid)\n\n self.put_event() # 更新UI使用.\n\n def on_trade(self, trade: TradeData):\n \"\"\"\n Callback of new trade data update.\n \"\"\"\n if trade.direction == Direction.LONG:\n total = self.avg_price * self.current_pos + float(trade.price) * float(trade.volume)\n self.current_pos += float(trade.volume)\n self.avg_price = total / self.current_pos\n elif trade.direction == Direction.SHORT:\n self.current_pos -= float(trade.volume)\n\n # 计算统计下总体的利润.\n self.total_profit += (float(trade.price) - self.avg_price) * float(trade.volume) - float(trade.volume) * float(trade.price) * 2 * self.trading_fee\n\n self.put_event()\n\n def on_stop_order(self, stop_order: StopOrder):\n \"\"\"\n Callback of stop order update.\n \"\"\"\n pass\n","sub_path":"howtrader/app/cta_strategy/strategies/martingle_future_strategyV2.py","file_name":"martingle_future_strategyV2.py","file_ext":"py","file_size_in_byte":8491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"236826109","text":"#TCP server example\nimport socket\nimport sys\nfrom _thread import *\n\nHOST = '127.0.0.1'\nPORT = 5004\n\nprint('Hulti server booting up')\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint(\"Socket created\")\n\ntry:\n\tserver.bind((HOST, PORT))\n\tprint('Socket bind ok ')\nexcept socket.error as msg:\n\tprint('Bind failed. Error code: ' + str(msg[0]) + ' Message: ' + msg[1])\n\tsys.exit()\n\nserver.listen(5)\nprint('Server listening on: ' + str(HOST) + ':' + str(PORT))\n\n#function that checks the message and allocates it to the \n#appropriate 'tower'\ndef checkMessage(message):\n\tif not message: \n\t\tprint('message is empty...')\n\tif 'T0' in message: \n\t\tprint('this is a message to tower 0')\n\tif 'T1' in message: \n\t\tprint('this is a message to tower 1')\n\tif 'T2' in message: \n\t\tprint('this is a message to tower 2')\n\tif 'T3' in message: \n\t\tprint('this is a message to tower 3')\n\t\t\n#function for handling connections. Used to create threads\ndef clientthread(conn):\n\tconn.send('Welcome to the void'.encode())\n\twhile True:\n\t\t#receive data from the client\n\t\t#this is what you need to parse serially\n\n\t\tdata = conn.recv(1024)\n\t\tcheckMessage(data.decode())\n\t\treply = 'Server Received: ' + str(data)\n\t\tprint('Received: ' + data.decode())\n\t\tif not data:\n\t\t\tbreak\n\t\tconn.sendall(reply.encode())\n\t#loop broken\n\tconn.close()\n\nwhile 1: \n\t#wait to accept a connection - blocking call\n\tconn, addr = server.accept()\n\tprint('Connected to client at: ' +addr[0] + ':' + str(addr[1]))\n\n\t#start new thread. Takes 1st argument as function name to be run\n\tstart_new_thread(clientthread, (conn,))\n\nserver.close()","sub_path":"scripts/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28589746","text":"import exceptions\nimport pycfitsio as fits\nimport os\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom utils import *\n\nREVCOUNTER_LABEL = {10:'REVCOUNTER_15GHZ', 15:'REVCOUNTER_10GHZ'}\n\nWRAPPING_FIELDS_PI = ['HYBRIDHEADINGANGLE', 'HYBRIDYAWANGLE']\nROTATION = 80/2e-9\nWRAPPING_FIELDS_360 = ['HEADING']\n\nDEVICES = ['ANALOGCHANNELS', 'MAGNETOMETER', 'ASHTECH', 'GYRO_HID']\n\ndef find_clock_offsets_from_gpstime(cc, gpstime):\n \"\"\"Computes computerClock offsets after restarts using gpstime\n\n all negative jumps in computerClock mean a restart.\n if the restart is between sample j and j+1, we compute\n the needed offset as:\n cc[j] - cc[j+1] + (gpstime[j+1] - gpstime[j])/2e-9\n where:\n cc[j+1] - cc[j] is the current jump in computerClock\n (gpstime[j+1] - gpstime[j]) /2e-9 is the correct jump computed from gpstime so we actually remove the current jump and add back\n the correct jump converted from gpstime\n \n Parameters\n ----------\n cc : ndarray\n computerClock array\n gpstime : ndarray\n gpstime array\n\n Returns\n -------\n offsets : ndarray\n offsets to apply to computerclock to compensate for restarts\n \"\"\"\n\n print('Find computerClock offsets')\n offsets_indices, = np.where(np.diff(cc)<0)\n print('Found clock jumps at indices %s, at relative position %s' % (str(offsets_indices), str(offsets_indices.astype(np.double)/len(cc)) ))\n offsets = [] \n for j in offsets_indices:\n offsets.append(cc[j] + (gpstime[j+1] - gpstime[j])/2e-9 - cc[j+1])\n cc = apply_cc_offsets(cc, offsets)\n return offsets\n\ndef apply_cc_offsets(cc, offsets):\n \"\"\"Blindly apply offsets computed from gpstime to cc\n\n offsets are blindly summed to the negative jumps\n in the cc array.\n\n Parameters\n ----------\n cc : ndarray\n computerClock to be corrected for restarts\n offsets : ndarray\n precomputed offsets\n\n Returns\n -------\n cc : ndarray\n corrected computerClock\n \"\"\"\n jumps, = np.where(np.diff(cc)<0)\n # apply offsets estimated with gpstime to computerclock of the revcounter\n if len(jumps) < len(offsets):\n print('Missing data in device')\n for index, offset in zip(jumps, offsets[:len(jumps)]):\n cc[index+1:] += offset\n return cc\n\ndef create_science_computerclock(gyro, revcounter, data_rev, offsets):\n \"\"\"Syncronizes science and gyro using the revcounter\n\n the revcounter has several gaps, so we first interpolate it uniformly\n at 140 Hz and then we interpolate the uniform revcounter computerclock\n to the science sampling rate using the revcounters\n\n Parameters\n ----------\n gyro : OrderedDict\n gyro data\n revcounter : OrderedDict\n revcounter data\n data_rev : ndarray\n revcounter of the scientific channel\n offsets : ndarray\n cc offsets from gpstime\n\n Returns\n -------\n sci_cc : ndarray\n syncronized scientific computerclock\n \"\"\"\n # servo revcounter is cleaned up from restarts and jumps\n servo_range = remove_reset(revcounter['VALUE'], offsetsci=data_rev[0])\n\n # apply offsets to revcounter cc\n revcounter['COMPUTERCLOCK'] = apply_cc_offsets(revcounter['COMPUTERCLOCK'], offsets)\n\n # oversample revcounter cc and value to 140 Hz in order to interpolate over gaps\n uniform_rev_cc = np.arange(revcounter['COMPUTERCLOCK'][servo_range][0], revcounter['COMPUTERCLOCK'][servo_range][-1], (1/1000.)/2.e-9, dtype=np.double)\n uniform_rev = np.interp( uniform_rev_cc, revcounter['COMPUTERCLOCK'][servo_range].astype(np.double), revcounter['VALUE'][servo_range].astype(np.double))\n\n flag = np.ceil(np.interp(uniform_rev_cc, revcounter['COMPUTERCLOCK'][1:], np.diff(revcounter['COMPUTERCLOCK'])>ROTATION))\n\n\n # create science data computer clock\n sci_cc = np.around(np.interp(data_rev.astype(np.double), uniform_rev.astype(np.double), uniform_rev_cc.astype(np.double)).astype(np.long))\n\n norevcountflag = np.ceil(np.interp(sci_cc, uniform_rev_cc, flag)).astype(np.uint8)\n\n return sci_cc, norevcountflag\n\ndef create_ut_from_gpstime(gpstime):\n # Fix gpstime to create the UT column\n fixed = np.mod((gpstime + 15.)/3600., 24.)\n # remove single sample jumps\n good = np.ones(len(fixed),dtype=np.bool)\n good[index_of_single_sample_jumps(fixed)] = False\n\n # get just the good samples\n gpstime_index = np.arange(len(gpstime))[good]\n\n # interpolate back to original length\n fixed = np.interp(np.arange(len(gpstime)), gpstime_index, fixed[good])\n\n # unwrap ut at 24 hours\n day_change_index, = np.where(np.diff(fixed)<-23)\n assert len(day_change_index) == 1\n fixed[day_change_index[0]+1:] += 24\n return fixed\n\ndef create_ut_from_cc(gyro):\n \"\"\"Create UT array from computerclock\n\n UT is defined as UT hour of the first day,\n e.g. 10 is 10am of the first day\n and monotonically increasing after 24,\n so 27.5 is 3:30am of the second day.\n \n Parameters\n ----------\n gyro : OrderedDict\n gyro data\n \n Returns\n -------\n utcc : ndarray\n computerclock of ut array\n ut : ndarray\n ut array\n \"\"\"\n # check that gyro computerclock is already fixed\n assert np.all(np.diff(gyro['COMPUTERCLOCK']) >= 0)\n ut = np.mod((gyro['GPSTIME'] + 15.)/3600., 24.)\n\n utcc = gyro['COMPUTERCLOCK']\n ut = ut[0] + (gyro['COMPUTERCLOCK'] - gyro['COMPUTERCLOCK'][0]) * 2e-9 / 3600.\n\n return utcc, ut\n\ndef create_utscience(sci_file, gyro, revcounter, offsets, utcc, ut, freq):\n \"\"\"Create file with science data with fixed CC and UT\n see create_utservo\n \"\"\"\n\n data = fits.read(sci_file)\n\n splitted_data = OrderedDict()\n splitted_data['TIME'] = OrderedDict()\n for ch_n in range(16):\n ch_name = 'CH%d_' % ch_n\n splitted_data[ch_name] = OrderedDict()\n for comp in 'TQU':\n splitted_data[ch_name][comp] = data[ch_name + comp]\n splitted_data['TIME']['COMPUTERCLOCK'], splitted_data['TIME']['NOREVCOUNTFLAG']= create_science_computerclock(gyro, revcounter, data['REV'], offsets)\n splitted_data['TIME']['UT'] = np.interp(splitted_data['TIME']['COMPUTERCLOCK'], utcc, ut)\n\n filename = '%s_%dGHz_data.fits' % (os.path.basename(sci_file).split('.')[0], freq)\n print('Writing ' + filename)\n fits.write(filename, splitted_data)\n\n return splitted_data\n\ndef create_utservo(servo_file, offsets, utcc, ut):\n \"\"\"Create file with servo data with fixed CC and UT\n\n Parameters\n ----------\n servo_file : str\n filename of servo data\n offsets : ndarray\n CC offsets computed from gpstime, see find_clock_offsets_from_gpstime\n utcc : ndarray\n CC array related to UT\n ut : ndarray\n UT array\n\n Returns\n -------\n writes utservo.fits file to disk\n \"\"\"\n print(\"Create UT timestamped servo data\")\n\n utservo = OrderedDict()\n for device in DEVICES:\n print('Processing ' + device)\n utservo[device] = fits.read(servo_file, device)\n utservo[device]['COMPUTERCLOCK'] = apply_cc_offsets(utservo[device]['COMPUTERCLOCK'], offsets)\n utservo[device]['UT'] = np.interp( utservo[device]['COMPUTERCLOCK'], utcc, ut)\n\n filename = 'utservo.fits'\n print('Writing ' + filename)\n fits.write(filename, utservo)\n return utservo\n\ndef create_sync_servo(servo_file, offsets, utcc, ut, sci_cc, freq):\n \"\"\"Create synchronized servo data\n\n Parameters\n ----------\n servo_file : srt\n filename of servo data\n offsets : ndarray\n CC offsets computed from gpstime, see find_clock_offsets_from_gpstime\n utcc : ndarray\n CC array related to UT\n ut : ndarray\n UT array\n sci_cc : ndarray\n CC array of scientific data\n freq : int\n frequency\n \"\"\"\n\n print(\"Create synchronized servo data to %dGHz\" % freq)\n filename = '%s_%dGHz_servo.fits' % (os.path.basename(servo_file).split('.')[0], freq)\n print('Writing ' + filename)\n f = fits.create(filename)\n ext = OrderedDict()\n ext['COMPUTERCLOCK'] = sci_cc\n ext['UT'] = np.interp(sci_cc, utcc, ut)\n f.write_HDU('TIME', ext)\n for device in DEVICES:\n print('Processing ' + device)\n raw_data = fits.read(servo_file, device)\n ext = OrderedDict()\n cc = apply_cc_offsets(raw_data['COMPUTERCLOCK'], offsets)\n for colname, colarray in raw_data.iteritems():\n if colname != 'COMPUTERCLOCK':\n print('Column ' + colname)\n ext[colname] = np.interp(sci_cc, cc, colarray)\n f.write_HDU(device, ext)\n f.close()\n \ndef fix_gyro_cc(gyro, ut):\n return gyro['COMPUTERCLOCK'][0] + (ut-ut[0])*3600/2e-9\n \ndef process_level1(base_folder='/COFE', day='all', use_cc=True):\n \"\"\"Full processing to produce Level1 data\n \n Parameters\n ----------\n base_folder : str\n path to data\n day : str\n day to be processed\n freq : int\n frequency\n \"\"\"\n gyro = fits.read(os.path.join(base_folder, 'servo', '%s.fits' % day), 'GYRO_HID')\n offsets = find_clock_offsets_from_gpstime(gyro['COMPUTERCLOCK'], gyro['GPSTIME'])\n if use_cc:\n utcc, ut = create_ut_from_cc(gyro)\n else:\n ut = create_ut_from_gpstime(gyro['GPSTIME'])\n gyro['COMPUTERCLOCK'] = fix_gyro_cc(gyro, ut)\n utcc = gyro['COMPUTERCLOCK']\n servo_file = os.path.join(base_folder,'servo','%s.fits' % day)\n create_utservo(servo_file, offsets, utcc, ut)\n for freq in [10, 15]:\n revcounter = fits.read(os.path.join(base_folder, 'servo', '%s.fits' % day), REVCOUNTER_LABEL[freq])\n sci = create_utscience(os.path.join(base_folder,str(freq),'%s.fits'%day), gyro, revcounter, offsets, utcc, ut, freq)\n create_sync_servo(servo_file, offsets, utcc, ut, sci['TIME']['COMPUTERCLOCK'], freq)\n\nif __name__ == '__main__':\n process_level1()\n #base_folder='/COFE'; day='all'\n #gyro = fits.read(os.path.join(base_folder, 'servo', '%s.fits' % day), 'GYRO_HID')\n #offsets = find_clock_offsets_from_gpstime(gyro['COMPUTERCLOCK'], gyro['GPSTIME'])\n #use_cc = True\n\n #servo_file = os.path.join(base_folder,'servo','%s.fits' % day)\n #utservo = create_utservo(servo_file, offsets, utcc, ut)\n #freq = 10\n #revcounter = fits.read(os.path.join(base_folder, 'servo', '%s.fits' % day), REVCOUNTER_LABEL[freq])\n #sci = create_utscience(os.path.join(base_folder,str(freq),'%s.fits'%day), gyro, revcounter, offsets, utcc, ut, freq)\n ##create_sync_servo(servo_file, offsets, utcc, ut, sci_cc, freq)\n #ra=slice(866653+1500,869045-700+1)\n #figure()\n #plot(sci['TIME']['UT'][ra],sci['CH1_']['T'][ra])\n #from smooth import smooth\n #figure()\n #plot(np.interp( sci['TIME']['UT'][ra], utservo['GYRO_HID']['UT'], utservo['GYRO_HID']['HYBRIDHEADINGANGLE'] ), sci['CH1_']['T'][ra])\n #plot(smooth(np.interp( sci['TIME']['UT'][ra], utservo['GYRO_HID']['UT'], utservo['GYRO_HID']['HYBRIDHEADINGANGLE'] ),30), sci['CH1_']['T'][ra])\n","sub_path":"utils_zonca/sync/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34896228","text":"import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nsock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\nsock.connect(('127.0.0.1',8888))\nprint(sock.recv(1024).decode())\nwhile 1:\n sock.send(input().encode())\n data = sock.recv(1024).decode()\n if(\"Game Lost\" in data or \"Game won!!\" in data):\n print(data)\n break\n print(data)\n","sub_path":"project/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380304926","text":"#######################################\n# LDIF Modifier v2.2\n# Date: 06.01.2018\n#\n# take in an LDIF file, parse & ID the target user attribute.\n# Modify based on config file passed to program in input file\n# Input:\n# ./Input/*.LDIF\n# ./Input/*.yml <-- not needed in this release\n# Output:\n# ./Output/*.LDIF\n#######################################\n\n#######################################\n### CodeBlock: Import Modules\n### & Define Functions\n#######################################\n### Import modules\n# import os, json\nimport os, re\n\n\n### Define functions\n# Funciton: Find String in child directory listings, non-recursive\ndef matchEP_fileName( curDirList, searchString):\n output = [entry for entry in curDirList if searchString in entry]\n return output\n\n#######################################\n### CodeBlock: Directory operations\n#######################################\ndirDict={} #Dictionary of directories and important folders\ncurDir = os.path.abspath(os.path.dirname(__file__)) ###EnvSpecific: Uncomment for Production\ninDir = curDir + \"/Input\"\noutDir = curDir + \"/Output\"\n#If output does not exist, create it\nif not os.path.exists(outDir):\n os.makedirs(outDir)\n#Test if input folder is in the application directory\nif os.path.isdir(inDir) == False:\n print (\"No Input Folder found\\nInput folder should be app folder ./Input/ \\nTerminating process\")\n quit()\ndirDict[\"ldifFiles\"] = matchEP_fileName ( os.listdir(inDir), \"ldif\") # Find matches in input directory for config & ldif files\ndirDict[\"configFile\"] = matchEP_fileName ( os.listdir(inDir), \"config\")\n\n\n\n\n#######################################\n### CodeBlock: Read config file\n#######################################\n#Test if input has at least 1 ldif, and a config.json ###COMMENTED OUT BECAUSE THE CONFIG FILE IS NOT YET USED\n# if os.path.exists(inDir + \"/\" + dirDict[\"ldifFiles\"][0]) == False or os.path.exists(inDir + \"/\" + dirDict[\"configFile\"][0]) == False : #If not input, quit the program\n# print (\"Input folder missing files\\nCheck for files:\\n\\t./Input/config.json\\n\\t./Input/.ldif\\nRefer to readme for more information\")\n# quit()\n\n# with open(inDir + \"/config.json\") as json_data_file: #open, read and close hte json\n# jsonData = json.load(json_data_file)\n# #Do something with the json in the future.... maybe....\n\n\n#######################################\n### CodeBlock: Read and Replace LDIF\n#######################################\nfor LDIF in dirDict[\"ldifFiles\"]:\n with open(inDir + \"/\" + LDIF) as ldifFile:\n newLDIF = [] #Create new LDIF per run of program\n lines = ldifFile.readlines()\n for lineInd in range(0, len(lines)): #For each index from range 0 to length of LDIF file, do the following\n prevLine = lines[lineInd-1]\n curLine = lines[lineInd]\n attr = curLine.split(':')\n usrAttr = \"\" #initialize the user attribute string as empty\n #Catch: if the line has multiple : in the email field it should not be delmimted after the first delimiter e.g. usrAtt: emailname:goeshere@target.com --> should only be split into 2 fields\n for tempString in attr[1:]:\n usrAttr = usrAttr + tempString\n if prevLine.startswith('dn: '): #if on line for proper distinguished name of user\n newLDIF.append(\"changetype: modify\\n\") #if first row after distinguished name, add new line for change Type\n newLDIF.append(\"replace: \" + attr[0] + '\\n')\n newLDIF.append(attr[0] + ':'+ usrAttr.replace('@target.com',\"@iamcorpqa.target.com\"))\n elif (re.match(r'version*',curLine , re.IGNORECASE) is not None) or (curLine == '\\n'):\n newLDIF.append('\\n')\n continue\n elif re.match(r'dn*',curLine , re.IGNORECASE) is not None:\n newLDIF.append(curLine)\n continue\n else:\n caseInsenRegEx = re.compile(re.escape('@target.com'),re.IGNORECASE)\n newLDIF.append('-\\n')\n newLDIF.append(\"replace: \" + attr[0] + '\\n')\n newLDIF.append(caseInsenRegEx.sub('@iamcorpqa.target.com',curLine))\n with open (outDir + \"/mod_\" + LDIF, 'w+') as newfile:\n for line in newLDIF:\n newfile.write(line)\n\n#######################################\n### CodeBlock: Quick and Dirty:\n### No config read, jsut replace all instances\n### of hard coded string into another string\n#######################################\n# ldifData = open(inDir + \"/\" + dirDict[\"ldifFiles\"][0]).read()\n# ldifData = ldifData.replace('@target.com',\"@iamcorpqa.target.com\")\n# newLDIFData = open(inDir + \"/\" + dirDict[\"ldifFiles\"][0], 'w')\n# newLDIFData.write(ldifData)\n# newLDIFData.close()\n","sub_path":"LDIF_Modifier/LDIFmodifier.py","file_name":"LDIFmodifier.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"254221800","text":"\nimport numpy as np\n\nradToDeg = 180./np.pi\n\ndef phaseTrigSinglet(R,M,q):\n\t\n\tkappa = -np.pi*M*q*R/2\n\tdelta = np.arctan(kappa)*radToDeg\n\treturn delta\n\t\ndef phaseLogSinglet(T,M,q):\n\t\n\tZ = -M*q*2j*T*np.pi/2+1\n\tdelta = -0.5*1j*np.log(Z)*radToDeg\n\treturn delta\n\ndef phaseTrigTriplet(R11, R12, R21, R22, M,q):\n\t# Blatt-Biedenharn (BB) convention\n\ttwoEpsilonJ_BB = np.arctan(2*R12/(R11-R22))\t# mixing parameter\n\tdelta_plus_BB = np.arctan(-(np.pi/4)*q*M*(R11 + R22 - (R11 - R22)/(np.cos(twoEpsilonJ_BB))))\n\tdelta_minus_BB = np.arctan(-(np.pi/4)*q*M*(R11 + R22 + (R11 - R22)/(np.cos(twoEpsilonJ_BB))))\n\t\n\tdelta_minus, delta_plus, epsilon = blattToStapp(delta_minus_BB, delta_plus_BB, twoEpsilonJ_BB)\n\t\n\treturn delta_minus, delta_plus, epsilon\n\t\ndef phaseLogTriplet(T11, T12, T21, T22, M,q):\n\tfac = 0.5*np.pi*M*q\n\t# Blatt-Biedenharn (BB) convention\n\ttwoEpsilonJ_BB = np.arctan(2*T12/(T11-T22))\t# mixing parameter\n\tdelta_plus_BB = -0.5*1j*np.log(1 - 1j*fac*(T11+T22) + 1j*fac*(2*T12)/np.sin(twoEpsilonJ_BB))\n\tdelta_minus_BB = -0.5*1j*np.log(1 - 1j*fac*(T11+T22) - 1j*fac*(2*T12)/np.sin(twoEpsilonJ_BB))\n\t\n\tdelta_minus, delta_plus, epsilon = blattToStapp(delta_minus_BB, delta_plus_BB, twoEpsilonJ_BB)\n\t\n\treturn delta_minus, delta_plus, epsilon\n\ndef blattToStapp(delta_minus_BB, delta_plus_BB, twoEpsilonJ_BB):\n\t\n\t# Stapp convention (bar-phase shifts) in terms of Blatt-Biedenharn convention\n\ttwoEpsilonJ = np.arcsin(np.sin(twoEpsilonJ_BB)*np.sin(delta_minus_BB - delta_plus_BB))\t# mixing parameter\n\tdelta_minus\t= 0.5*(delta_plus_BB + delta_minus_BB + np.arcsin(np.tan(twoEpsilonJ)/np.tan(twoEpsilonJ_BB)))*radToDeg\n\tdelta_plus\t= 0.5*(delta_plus_BB + delta_minus_BB - np.arcsin(np.tan(twoEpsilonJ)/np.tan(twoEpsilonJ_BB)))*radToDeg\n\tepsilon\t\t= 0.5*twoEpsilonJ*radToDeg\n\t\n\treturn delta_minus, delta_plus, epsilon\n\ndef calcPhase(Tlist, qList, M, J, matType):\n\t\n\tmixes = []\n\t\n\tif J==0:\n\t\tdelta = [[],[]]\n\t\tfor i in xrange(len(Tlist)):\n\t\t\t\n\t\t\tT = Tlist[i]\n\t\t\tq = qList[i]\n\t\t\t\n\t\t\tif matType == \"T\":\n\t\t\t\tdelta_0 = phaseLogSinglet(T[0],M,q)\n\t\t\t\tdelta_1 = phaseLogSinglet(T[1],M,q)\n\t\t\telse:\n\t\t\t\tdelta_0 = phaseTrigSinglet(T[0],M,q)\n\t\t\t\tdelta_1 = phaseTrigSinglet(T[1],M,q)\n\t\t\t\n\t\t\tdelta[0].append( delta_0 )\n\t\t\tdelta[1].append( delta_1 )\n\telse:\n\t\tdelta = [[],[],[],[]]\n\t\tfor i in xrange(len(Tlist)):\n\t\t\t\n\t\t\tT = Tlist[i]\n\t\t\tq = qList[i]\n\t\t\t\n\t\t\tif matType == \"T\":\n\t\t\t\tdelta_0 = phaseLogSinglet(T[0],M,q)\n\t\t\t\tdelta_1 = phaseLogSinglet(T[1],M,q)\n\t\t\t\tdelta_minus, delta_plus, epsilon = phaseLogTriplet(T[2], T[3], T[4], T[5], M,q)\n\t\t\telse:\n\t\t\t\tdelta_0 = phaseTrigSinglet(T[0],M,q)\n\t\t\t\tdelta_1 = phaseTrigSinglet(T[1],M,q)\n\t\t\t\tdelta_minus, delta_plus, epsilon = phaseTrigTriplet(T[2], T[3], T[4], T[5], M,q)\n\t\t\t\n\t\t\tdelta[0].append( delta_0 )\n\t\t\tdelta[1].append( delta_1 )\n\t\t\tdelta[2].append( delta_minus )\n\t\t\tdelta[3].append( delta_plus )\n\t\t\tmixes.append( epsilon )\n\t\t\t\n\treturn delta, mixes\n","sub_path":"Python/Calculate_phases/Calculate_phase.py","file_name":"Calculate_phase.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"239091465","text":"import argparse\n\nimport numpy as np\nimport h5py\nfrom tqdm import tqdm\nfrom keras.models import load_model\n\n\ndef main(args):\n if args.trk_var:\n import imp\n trk_mod = imp.load_source(\"trk_mod\", args.trk_var)\n trk_var = trk_mod.invars\n else:\n from rnn_tauid.common.variables import track_vars as trk_var\n\n if args.cls_var:\n import imp\n cls_mod = imp.load_source(\"cls_mod\", args.variables)\n cls_var = cls_mod.invars\n else:\n from rnn_tauid.common.variables import cluster_vars as cls_var\n \n # Load jet variables\n if args.jet_var:\n import imp\n jet_var = imp.load_source(\"jet_var\", args.jet_var)\n invars_ffnn = jet_var.invars\n else:\n if \"1p\" in args.data.lower():\n from rnn_tauid.common.variables import id1p_vars as invars_ffnn\n elif \"3p\" in args.data.lower():\n from rnn_tauid.common.variables import id3p_vars as invars_ffnn\n else:\n print(\"Could not infer prongness from sample name.\")\n sys.exit()\n\n # Load preprocessing rules\n with h5py.File(args.preprocessing_track, \"r\") as f:\n pp_invars = np.char.decode(f[\"variables\"][...]).tolist()\n trk_offset = {v: f[v + \"/offset\"][...] for v in pp_invars}\n trk_scale = {v: f[v + \"/scale\"][...] for v in pp_invars}\n\n with h5py.File(args.preprocessing_cluster, \"r\") as f:\n pp_invars = np.char.decode(f[\"variables\"][...]).tolist()\n cls_offset = {v: f[v + \"/offset\"][...] for v in pp_invars}\n cls_scale = {v: f[v + \"/scale\"][...] for v in pp_invars}\n\n with h5py.File(args.preprocessing_jet, \"r\") as f:\n pp_invars = np.char.decode(f[\"variables\"][...]).tolist()\n jet_offset = {v: f[v + \"/offset\"][...] for v in pp_invars}\n jet_scale = {v: f[v + \"/scale\"][...] for v in pp_invars}\n\n\n # Load model\n model = load_model(args.model)\n num_trk = 10\n num_cls = 6\n\n # Load the data\n h5file = dict(driver=\"family\", memb_size=10*1024**3)\n with h5py.File(args.data, \"r\", **h5file) as data:\n length = len(data[\"TauJets/pt\"])\n n_vars_trk = len(trk_var)\n n_vars_cls = len(cls_var)\n n_vars_ffnn = len(invars_ffnn)\n\n chunksize = 500000\n chunks = [(i, min(length, i + chunksize))\n for i in range(0, length, chunksize)]\n\n x_trk = np.empty((chunksize, num_trk, n_vars_trk))\n x_cls = np.empty((chunksize, num_cls, n_vars_cls))\n x_jet = np.empty((chunksize, n_vars_ffnn))\n pred = np.empty(length, dtype=np.float32)\n\n for start, stop in tqdm(chunks):\n src_trk = np.s_[start:stop, :num_trk]\n src_cls = np.s_[start:stop, :num_cls]\n src_jet = np.s_[start:stop]\n lslice = stop - start\n\n for i, (varname, func, _) in enumerate(trk_var):\n dest = np.s_[:lslice, ..., i]\n if func:\n func(data, x_trk, source_sel=src_trk, dest_sel=dest)\n else:\n data[varname].read_direct(x_trk, source_sel=src_trk, dest_sel=dest)\n\n x_trk[dest] -= trk_offset[varname]\n x_trk[dest] /= trk_scale[varname]\n\n for i, (varname, func, _) in enumerate(cls_var):\n dest = np.s_[:lslice, ..., i]\n if func:\n func(data, x_cls, source_sel=src_cls, dest_sel=dest)\n else:\n data[varname].read_direct(x_cls, source_sel=src_cls, dest_sel=dest)\n\n x_cls[dest] -= cls_offset[varname]\n x_cls[dest] /= cls_scale[varname]\n\n for i, (varname, func, _) in enumerate(invars_ffnn):\n dest = np.s_[:lslice, ..., i]\n if func:\n func(data, x_jet, source_sel=src_jet, dest_sel=dest)\n else:\n data[varname].read_direct(x_jet, source_sel=src_jet, dest_sel=dest)\n\n x_jet[dest] -= jet_offset[varname]\n x_jet[dest] /= jet_scale[varname]\n\n # Replace nans\n x_trk[np.isnan(x_trk)] = 0\n x_cls[np.isnan(x_cls)] = 0\n x_jet[np.isnan(x_jet)] = 0\n\n # Predict\n pred[start:stop] = model.predict(\n [x_trk[:lslice], x_cls[:lslice], x_jet[:lslice]],\n batch_size=256).ravel()\n\n with h5py.File(args.outfile, \"w\") as outf:\n outf[\"score\"] = pred\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"preprocessing_track\")\n parser.add_argument(\"preprocessing_cluster\")\n parser.add_argument(\"preprocessing_jet\")\n parser.add_argument(\"model\")\n parser.add_argument(\"data\")\n parser.add_argument(\"--v-trk\", dest=\"trk_var\", default=None)\n parser.add_argument(\"--v-cls\", dest=\"cls_var\", default=None)\n parser.add_argument(\"--v-jet\", dest=\"jet_var\", default=None)\n parser.add_argument(\"-o\", dest=\"outfile\", default=\"pred.h5\")\n\n args = parser.parse_args()\n main(args)\n","sub_path":"scripts/id/deco_combined_tauid_trk_cls.py","file_name":"deco_combined_tauid_trk_cls.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"410765822","text":"# Verificador de Ip externo\n\nimport re\nimport json\nfrom urllib.request import urlopen\n\nurl = 'http://ipinfo.io/json'\n\nresposta = urlopen(url)\n\ndados = json.load(resposta)\n\nip = dados['ip']\norg = dados['org']\ncidade = dados['city']\npais = dados['country']\nregiao = dados['region']\n\nprint('Detalhes do IP externo\\n')\nprint('IP: {4}\\n Região: {1}\\n Pais: {2}\\n Cidade: {3}\\n Org.: {0}'.format(org, regiao, pais, cidade, ip))","sub_path":"verificadorIPexterno/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317480497","text":"s = \"I am an NLPer\"\n\ndef ngram(words, n):\n output = []\n for i in range(len(words) - (n - 1)):\n output.append([words[i], words[i+1]])\n return output\n\nwords = s.split(\" \")\nchars = \"\".join(words)\nprint(ngram(words, 2))\nprint(ngram(chars, 2))\n","sub_path":"chapter.1/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"557508750","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on 2014/11/20\n@author: xiluoduyu@163.com\n'''\n\nimport os\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\ndef isint(strpm):\n if '-' in strpm:\n if not strpm.startswith('-'):return False\n else:strpm = strpm[1:]\n return strpm.isdigit()\n\ndef isfloat(strpm):\n if '.' in strpm and strpm.count('.') == 1 and len(strpm) > 1:\n if '-' in strpm:\n if not strpm.startswith('-'):\n return False\n else:\n strpm = strpm[1:]\n t = strpm.split('.')\n if (not t[0] or t[0].isdigit()) and (not t[1] or t[1].isdigit()):\n return True\n\n return False\n\ndef islist(strpm):\n if strpm.startswith('[') and strpm.endswith(']'):\n return True\n else:\n return False\n\ndef isboolean(strpm):\n return strpm in ['True', 'False']\n\n\n\n#################################################################################\n\n\ndef readconf(configure_file):\n tmpMap = {}\n\n lines = open(configure_file, encoding='utf8').read().split('\\n')\n for lne in lines:\n if not lne or lne.startswith('#'):continue\n\n if '=' in lne:\n pos = lne.find('=')\n if pos == 0 or pos == len(lne) - 1:\n continue\n\n key , value = lne[:pos].strip(), lne[pos + 1:].strip()\n if not key or not value:continue\n\n if ((value.startswith(\"'\") and value.endswith(\"'\")) or\n (value.startswith('\"') and value.endswith('\"'))\n ):\n value = value[1:-1]\n\n if isint(value): # int value\n tmpMap[key] = int(value)\n elif isfloat(value):\n tmpMap[key] = float(value)\n elif islist(value):\n tmpMap[key] = eval(value)\n elif isboolean(value):\n tmpMap[key] = eval(value)\n else:\n tmpMap[key] = value\n\n return tmpMap\n\n\n\ndef logger(file, name=None, fmt=None, level=logging.INFO):\n dirname = os.path.dirname(file)\n if not os.path.exists(dirname):\n os.makedirs(dirname, exist_ok=True)\n\n handler = RotatingFileHandler(file, encoding='utf8')\n handler.setFormatter(logging.Formatter(fmt))\n\n loggerObj = logging.getLogger(name)\n loggerObj.addHandler(handler)\n\n loggerObj.setLevel(level)\n\n return loggerObj\n\n\n\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"205219320","text":"#Kyle Verdeyen\n#kverdey1@jhu.edu\n#Computer Vision EN.601.461\n#Assignment 1\n#Programming section 1, p2\n#p2.py: runs sequential labeling algorithm that segments binary image into connected regions\n#4-connected labeling algorithm\nimport cv2\nimport numpy as np\n\n#Modeled after StackOverflow disjoint set data structure\n#https://stackoverflow.com/questions/3067529/a-set-union-find-algorithm\n#represents equivalence table, but unions automatically when items added,\n#instead of having to track two different sets. \nclass EquivalenceTable(object): \n\n\tdef __init__(self):\n\t\t\tself.labels = {} \n\t\t\tself.objects = {}\n\n\tdef insert(self, x, y): #definition for adding new entry with union-find done automatically\n\t\tlabelsx = self.labels.get(x) #find value 1\n\t\tlabelsy = self.labels.get(y) #find value 2\n\n\t\tif labelsx is not None: #if label1 not empty (isn't in the set)\n\t\t\tif labelsy is not None: #if label2 not empty\n\t\t\t\tif labelsx == labelsy:#if equivalent\n\t\t\t\t\treturn #we don't need to do anything\n\n\t\t\t\tobjectsx = self.objects[labelsx] \n\t\t\t\tobjectsy = self.objects[labelsy]\n\n\t\t\t\tif len(objectsx) < len(objectsy): #set 1 is shorter than set 2\n\t\t\t\t \tx, labelsx, objectsx, y, labelsy, objectsy = y, labelsy, objectsy, x, labelsx, objectsx\n\t\t\t\t \t#don't want to do the temp value shuffle\n\t\t\t\t \t#x = y\n\t\t\t\t \t#labelsx = labelsy\n\t\t\t\t \t#objectsx = objectsy\n\t\t\t\t \t#y = x\n\t\t\t\t \t#labelsy = labelsx \n\t\t\t\t \t#objectsy = objectsx\n\t\t\t\t#multiline fails to label continuous objects with all same label\n\t\t\t\t#unions = objectsx | objectsy\n\t\t\t\t#objectsx = unions\n\t\t\t\tobjectsx |= objectsy #bitwise or assignment, only true if y is in x\n\t\t\t\tdel self.objects[labelsy] #delete the entry for 2\n\t\t\t\tfor z in objectsy: #union \n\t\t\t\t\tself.labels[z] = labelsx #every label position in ob2 gets label1 \n\n\t\t\telse: #label2 is empty\n\t\t\t\tself.objects[labelsx].add(y) #add 2 to objects\n\t\t\t\tself.labels[y] = labelsx #label position y gets label2\n\t\telse: # x is not found\n\t\t\tif labelsy is not None: #in y but not x \n\t\t\t\tself.objects[labelsy].add(x) #add to equivalence\n\t\t\t\tself.labels[x] = labelsy\n\t\t\telse: #it's a brand new object\n\t\t\t\tself.labels[x] = self.labels[y] = x #new label entry\n\t\t\t\tself.objects[x] = set([x, y]) # add objects to table\n\n\n#directions: left, up, upleft, e.g. we're first checking bottom right in check4\n#this could probably be split out into separate methods, implementation is pretty slow and has a deep cyclical loop\n#essentially brute force but whatever works\ndef p2(binary_in): #return labels_out\n\t#ret, labels_out = cv2.connectedComponents(binary_in)\n\tdimensions = np.shape(binary_in)\n\trows = dimensions[0]\n\tcolumns = dimensions[1]\n\tlabels_out = np.zeros((rows,columns))\n\t#labels_out = np.empty(dimensions, dtype = int)\n\tvalue = 1\n\tequivalence_table = EquivalenceTable() #see below\n\t#for location, value in ndenumerate(self.binary_in): can't use this since we need directionality\n\tfor x in range(rows):\n\t\tfor y in range(columns):\n\t\t\tif binary_in[x][y] > 0: #input value nonzero\n\t\t\t\tif x == 0: #if we're on the top row\n\t\t\t\t\tif y > 0: #not on the leftmost column\n\t\t\t\t\t\tleft = labels_out[x][y-1] #check left\n\t\t\t\t\t\tif left > 0: #nonzero left\n\t\t\t\t\t\t\tlabels_out[x][y] = left #assign label\n\t\t\t\t\t\t\t#value += 1 #increment label\n\t\t\t\t\t\telse: #zero left\n\t\t\t\t\t\t\tlabels_out[x][y] = value\n\t\t\t\t\t\t\tvalue += 1\n\t\t\t\t\telse: #on the leftmost column\n\t\t\t\t\t\tlabels_out[x][y] = value\n\t\t\t\t\t\tvalue += 1\n\t\t\t\telif y == 0: #not on the top row, check if left column\n\t\t\t\t\tif x > 0: #not top row\n\t\t\t\t\t\tup = labels_out[x-1][y]\n\t\t\t\t\t\tif up > 0: #up cell is not null\n\t\t\t\t\t\t\tlabels_out[x][y] = up #gets same label as up\n\t\t\t\t\t\telse: #up cell has no value [unlabeled]\n\t\t\t\t\t\t\tlabels_out[x][y] = value #mark working cell\n\t\t\t\t\t\t\tvalue += 1 #and increment\n\t\t\t\t\telse: #on top row\n\t\t\t\t\t\tlabels_out[x][y] = value\n\t\t\t\t\t\tvalue += 1\n\t\t\t\telse: #not on the top row, not on the left column\n\t\t\t\t\tup = labels_out[x-1][y] #get neighboring pixels since we can't go out of bounds\n\t\t\t\t\tupleft = labels_out[x-1][y-1]\n\t\t\t\t\tleft = labels_out[x][y-1]\n\t\t\t\t\t#start check, nothing around us?\n\t\t\t\t\tif up == 0 and upleft == 0 and left == 0:\n\t\t\t\t\t\tlabels_out[x][y] = value\n\t\t\t\t\t\tvalue += 1\n\t\t\t\t\t#ok, there's stuff around. check if we're bordering both\n\t\t\t\t\telif left > 0 and up > 0 and upleft == 0:\n\t\t\t\t\t\tlabels_out[x][y] = up\n\t\t\t\t\t\tif left != up: #check if we need to add to equiv table\n\t\t\t\t\t\t\tequivalence_table.insert(left, up)\n\t\t\t\t\t\t#don't care otherwise\n\t\t\t\t\t#neighboring pixels not both occupied. check cell up\n\t\t\t\t\telif up > 0 and left == 0 and upleft == 0:\n\t\t\t\t\t\tlabels_out[x][y] = up\n\t\t\t\t\t\t#neightboring cells unoccupied so don't need to increment labels or add to table\n\t\t\t\t\t#not bordering, not top. check cell left\n\t\t\t\t\telif left > 0 and up == 0 and upleft == 0:\n\t\t\t\t\t\tlabels_out[x][y] = left\n\t\t\t\t\t#left and right empty, check upper left\n\t\t\t\t\telif upleft > 0:\n\t\t\t\t\t\tlabels_out[x][y] = upleft\n\t\t\t\t\t#no close cells occupied\n\t\t\t\t\telse:\n\t\t\t\t\t\tlabels_out[x][y] = value\n\t\t\t\t\t\tvalue += 1\n\t\t\telse: #we're looking at a zero value\n\t\t\t\tlabels_out[x][y] = 0 #don't care, nothing important\n\n\t#done with primary first pass, 2nd pass to resolve equivalence\n\tfor x in range(rows):\n\t\tfor y in range(columns):\n\t\t\tif labels_out[x][y] > 0: #analyze pixels. only care if nonzero, ignore otherwise\n\t\t\t\tcount = 0 #start \n\t\t\t\tfor z in equivalence_table.objects: #load line\n\t\t\t\t\tif labels_out[x][y] in equivalence_table.objects[z]: #if pixel under scrutinization is in this line of the equivalence table\n\t\t\t\t\t\t#generates rudimentary grayscale value based on label\n\t\t\t\t\t\tlabels_out[x][y] = int(len(equivalence_table.objects)*(count+1))\n\t\t\t\t\t\tbreak #loopback\n\t\t\t\t\tcount += 1 #increment\n\n\treturn labels_out\n","sub_path":"HW1/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317360741","text":"import tkinter\nimport tkinter.messagebox\nfrom load_save import load_info\nfrom functools import partial\n\nFILENAME = 'ClubInfo.dat'\n\nclub_dic = load_info(FILENAME)\n\n\nclass Choose_clubGUI:\n\n def __init__(self):\n club_dic = load_info(FILENAME)\n\n self.main_window = tkinter.Tk(className=\"社團資訊系統\")\n self.main_window.geometry('+800+300')\n\n self.top_frame = tkinter.Frame(self.main_window)\n self.mid_frame = tkinter.Frame(self.main_window)\n self.bottom_frame = tkinter.Frame(self.main_window)\n\n self.prompt_label = tkinter.Label(self.top_frame, text='選擇一個社團:')\n\n self.prompt_label.grid(padx=5, pady=5)\n\n for key in club_dic:\n self.key = tkinter.Button(self.mid_frame, text=club_dic[key][0],\n command=partial(self.m, key))\n\n self.key.grid(padx=2, pady=2)\n\n self.quit_button = tkinter.Button(self.bottom_frame, text='結束',\n command=self.main_window.destroy)\n\n self.quit_button.grid(padx=5, pady=5)\n\n self.top_frame.grid(padx=5, pady=5)\n self.mid_frame.grid(padx=5, pady=5)\n self.bottom_frame.grid(padx=5, pady=5)\n\n tkinter.mainloop()\n\n def m(self, key):\n FILENAME = 'ClubInfo.dat'\n\n club_dic = load_info(FILENAME)\n\n print(club_dic[key][0])\n tkinter.messagebox.showinfo('社團資訊',\n '社團名稱:' + club_dic[key][0] + '\\n' + \\\n '社團人數:' + club_dic[key][2] + '\\n' + \\\n '社團性質:' + club_dic[key][1] + '\\n' + \\\n '社課時間:' + club_dic[key][3] + '\\n' + \\\n '社辦:' + club_dic[key][4] + '\\n' + \\\n '介紹:' + club_dic[key][5])\n","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653464958","text":"\"\"\"\nDjango settings for ask_straw project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '1&+)8(kc#v+r-852m6sadgtfhbm@^oub+tbk$kt#+2*3k6+l37'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'registration', # the 'django-registration-redux' package\n 'ask',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'ask_straw.urls'\n\nWSGI_APPLICATION = 'ask_straw.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nif 'STRAW_RDS' in os.environ:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'ask_straw_db',\n 'USER': 'django',\n 'PASSWORD': 'django',\n 'HOST': 'straw-db-instance.cfxeyadobpvg.ap-northeast-1.rds.amazonaws.com',\n 'PORT': '3306',\n }\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'ask_straw_db',\n 'USER': 'django',\n 'PASSWORD': 'django',\n 'HOST': '127.0.0.1',\n # 'HOST': 'straw-db-instance.cfxeyadobpvg.ap-northeast-1.rds.amazonaws.com',\n 'PORT': '3306',\n }\n }\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static_root')\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\n\n# Media files (user-uploaded files)\n\nif 'STRAW_RDS' in os.environ:\n MEDIA_ROOT = '/home/media_root' # os.path.join(BASE_DIR, 'media_root')\nelse:\n MEDIA_ROOT = 'E:/media_root'\n\n\n# Authentication, login, registration\n\nLOGIN_URL = '/accounts/login/'\n\n# django-registration-redux\nACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.\nREGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.\nREGISTRATION_OPEN = True # If True, users can register\n","sub_path":"ask_straw/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104720759","text":"#coding=utf-8\n\nimport random\n\n\ndef opt(msize,ins_list,pages):\n\t\n\tpage_list=[]\n\tfor i in ins_list:\n\t\tpage_list.append(i//10)\n\tmirrors=[]\n\t\n\tfault=0\n\tfor i in range(msize):\n\t\t\n\t\tmirrors.append(ins_list[i]//10)\n\t\t\n\t\n\tprint(mirrors)\n\tfor i in ins_list[msize:]:\n\t\tprint(i)\n\t\tif i//10 not in mirrors:\n\t\t\tfault+=1\n\t\t\tprint(\"page fault\")\n\t\t\t#寻找未来第一次出现时候的下标里最大的 最长时间不被访问的\n\t\t\t#for j in ins_list[ ins_list.index(i):]:\n\t\t\t#对于之后还有的:\n\t\t\tfor j in range(msize):\n\t\t\t\tif int(mirrors[j]) in page_list[ins_list.index(int(i)):]:\n\t\t\t\t\ta=ins_list[:].index(int(mirrors[j]))\n\t\t\t\t\tdistances.append(a)\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tprint(1000000000)\n\t\t\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\n\n\n\ndef produce_addstream():\n\tins_list=[]\n\ttime=0\n\twhile time<319:\n\t\tstart=random.randint(0,319)\n\t\tins_list.append(start+1)\n\t\tfore=random.randint(0,start+1)\n\t\tins_list.append(fore)\n\t\tins_list.append(fore+1)\n\t\tback=random.randint(fore+2,319)\n\t\tins_list.append(back)\n\t\ttime+=4\n\t\n\t#print(ins_list)\n\t#print(\"***********************\")\t\n\treturn ins_list\n\n \n\nif __name__==\"__main__\":\n\t#note()\n\tori_list=[]\n\tfor i in range(320):\n\t\tori_list.append(i)\n\tpages=[ori_list[i:i+10] for i in range(0,len(ori_list),10)]\n\tins_list=produce_addstream()\n\tprint(ins_list)\n\tprint(pages)\n\t\n\topt=opt(4,ins_list,pages)\n\t\t\n\t\n\n","sub_path":"OS-learning/ex4_page_replace/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561144662","text":"import numpy as np\nfrom Bio import SeqIO\nfrom itertools import product\n\nGAP_PEN = -2\n\ndef oap(s1: str, s2:str) -> tuple:\n m = len(s1)\n n = len(s2)\n tab = np.zeros(shape=(m+1, n+1), dtype=int)\n pntrs = np.zeros(shape=(m+1, n+1), dtype=int)\n # fill tables\n for i in range(m+1):\n tab[i, 0] = 0\n pntrs[i, 0] = 2\n for j in range(n+1):\n tab[0, j] = j * GAP_PEN\n pntrs[0, j] = 1\n \n for i, j in product(range(1, m+1), range(1, n+1)):\n scores = [\n tab[i-1, j-1] + (1 if s1[i-1] == s2[j-1] else GAP_PEN),\n tab[i, j-1] + GAP_PEN,\n tab[i-1, j] + GAP_PEN\n ]\n tab[i, j] = max(scores)\n pntrs[i, j] = scores.index(tab[i, j])\n\n # backtrack\n last_col = tab[m, :]\n j = np.where(last_col == last_col.max())[0][-1]\n i = m\n s1_al, s2_al = \"\", \"\"\n max_score = tab[i, j]\n while (i > 0 ) and (j > 0):\n if (pntrs[i, j] == 0):\n s1_al += s1[i - 1]\n s2_al += s2[j - 1]\n i, j = i-1, j-1\n elif (pntrs[i, j] == 1):\n s2_al += s2[j - 1]\n s1_al += \"-\"\n j-= 1\n else: # pntrs[i, j] == 2\n s1_al += s1[i - 1]\n s2_al += \"-\"\n i -= 1\n\n return max_score, s1_al[::-1], s2_al[::-1]\n\ndef main():\n seq1, seq2 = (item.seq for item in SeqIO.parse(\"rosalind_oap.txt\", \"fasta\"))\n \n with open(\"out.txt\", \"w\") as o:\n print(*oap(seq1, seq2), sep='\\n', file=o) \n\nif __name__ == \"__main__\":\n main()","sub_path":"Bioinformatics Stronghold/77_oap.py","file_name":"77_oap.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"361057299","text":"\"\"\"\nChapter 02 - Problem 02 - return kth to last - CTCI 6th Edition\nProblem Statement:\nImplement an algorithm to find K'th to last element of a singly linked list.\nNote : Size of linked list is not known\n\n\"\"\"\ndef return_kth_to_last(head, k):\n if k <= 0:\n return None\n slow = head\n fast = head\n for i in range(k):\n if fast is None and i < k - 1:\n return None\n fast = fast.next_node\n while fast is not None:\n slow = slow.next_node\n fast = fast.next_node\n return slow\n","sub_path":"python_solutions/chapter_02_linked_lists/problem_02_02_return_kth_to_last.py","file_name":"problem_02_02_return_kth_to_last.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222277990","text":"from lxml import html\nimport requests\nimport json\nimport sys\nfrom collections import OrderedDict\nimport pandas as pd\npd.core.common.is_list_like = pd.api.types.is_list_like\nfrom time import sleep\nimport pandas_datareader as web\nimport datetime\nimport csv\nimport os\nimport matplotlib.pyplot as plt\nfrom stockstats import StockDataFrame\nimport numpy as np\nimport random\n#import talib\nfrom sklearn.preprocessing import StandardScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\nrandom.seed(42)\nkers_array = []\n\ndef HistoricData(ticker):\n print(ticker)\n end = datetime.datetime.today()\n start = datetime.date(end.year - 5, 1, 1)\n df = web.DataReader(ticker, 'yahoo', start, end)\n stockstats_DF = StockDataFrame.retype(df)\n stockstats_DF['RSI'] = stockstats_DF['rsi_14']\n stockstats_DF.get('macd')\n stockstats_DF.get('volume_delta')\n stockstats_DF.get('boll')\n stockstats_DF['H-L'] = stockstats_DF['high'] - stockstats_DF['low']\n stockstats_DF['O-C'] = stockstats_DF['close'] - stockstats_DF['open']\n stockstats_DF['3day MA'] = stockstats_DF['close'].shift(1).rolling(window=3).mean()\n stockstats_DF['10day MA'] = stockstats_DF['close'].shift(1).rolling(window=10).mean()\n stockstats_DF['30day MA'] = stockstats_DF['close'].shift(1).rolling(window=30).mean()\n stockstats_DF['Std_dev'] = stockstats_DF['close'].rolling(5).std()\n stockstats_DF['Williams %R'] = talib.WILLR(stockstats_DF['high'].values, stockstats_DF['low'].values, stockstats_DF['close'].values, 7)\n stockstats_DF['Price_Rise'] = np.where(stockstats_DF['close'].shift(-1) > stockstats_DF['close'], 1, 0)\n stockstats_DF = stockstats_DF.dropna()\n\n stockstats_DF.drop(\"rsi_14\", axis=1, inplace=True)\n stockstats_DF.drop( \"rs_14\", axis=1, inplace=True)\n stockstats_DF.drop( \"close_-1_d\", axis=1, inplace=True)\n stockstats_DF.drop(\"close_-1_s\", axis=1, inplace=True)\n stockstats_DF.drop(\"close_12_ema\", axis=1, inplace=True)\n stockstats_DF.drop(\"close_26_ema\", axis=1, inplace=True)\n stockstats_DF.drop(\"close_20_mstd\", axis=1, inplace=True)\n stockstats_DF.drop(\"close_20_sma\", axis=1, inplace=True)\n\n #NNetwork(stockstats_DF)\n print(stockstats_DF)\n stockstats_DF.to_csv(r'C:\\Users\\Isaac\\Desktop\\DataProjects\\HistoricalPrices\\%s.csv' % ticker)\n\ndef getTickers():\n global tickers_array\n tickers_array = []\n i=0\n with open(r\"C:\\Users\\Isaac\\Desktop\\DataProjects\\tickers.txt\", newline='') as line:\n line_reader = csv.reader(line, delimiter='\\t')\n next(line_reader)\n for line in line_reader:\n tickers_array.append(line)\n\n for i in range(0, len(tickers_array)):\n print(tickers_array[i])\n\n#Parse Function from scrapehero.com\n#https://www.scrapehero.com/scrape-yahoo-finance-stock-market-data/\ndef parse(ticker):\n url = \"http://au.finance.yahoo.com/quote/%s?p=%s\" % (ticker, ticker)\n response = requests.get(url, verify=False)\n print(\"Parsing %s\" % (url))\n sleep(4)\n parser = html.fromstring(response.text)\n summary_table = parser.xpath('//div[contains(@data-test,\"summary-table\")]//tr')\n summary_data = OrderedDict()\n other_details_json_link = \"https://query2.finance.yahoo.com/v10/finance/quoteSummary/{0}?formatted=true&lang=en-US®ion=US&modules=summaryProfile%2CfinancialData%2CrecommendationTrend%2CupgradeDowngradeHistory%2Cearnings%2CdefaultKeyStatistics%2CcalendarEvents&corsDomain=finance.yahoo.com\".format(\n ticker)\n summary_json_response = requests.get(other_details_json_link)\n try:\n json_loaded_summary = json.loads(summary_json_response.text)\n y_Target_Est = json_loaded_summary[\"quoteSummary\"][\"result\"][0][\"financialData\"][\"targetMeanPrice\"]['raw']\n earnings_list = json_loaded_summary[\"quoteSummary\"][\"result\"][0][\"calendarEvents\"]['earnings']\n eps = json_loaded_summary[\"quoteSummary\"][\"result\"][0][\"defaultKeyStatistics\"][\"trailingEps\"]['raw']\n datelist = []\n for i in earnings_list['earningsDate']:\n datelist.append(i['fmt'])\n earnings_date = ' to '.join(datelist)\n for table_data in summary_table:\n raw_table_key = table_data.xpath('.//td[contains(@class,\"C(black)\")]//text()')\n raw_table_value = table_data.xpath('.//td[contains(@class,\"Ta(end)\")]//text()')\n table_key = ''.join(raw_table_key).strip()\n table_value = ''.join(raw_table_value).strip()\n summary_data.update({table_key: table_value})\n summary_data.update(\n {'1y Target Est': y_Target_Est, 'EPS (TTM)': eps, 'Earnings Date': earnings_date, 'ticker': ticker,\n 'url': url})\n return summary_data\n except:\n print(\"Failed to parse json response\")\n return {\"error\": \"Failed to parse json response\"}\n\ndef sorter():\n data = pd.read_csv(r\"C:\\Users\\Isaac\\Desktop\\DataProjects\\tickers.csv\")\n Consumer_Discretionary_data = data[data.Type == \"Consumer Discretionary\"]\n Consumer_Staples_data = data[data.Type == \"Consumer Staples\"]\n Energy_data = data[data.Type == \"Energy\"]\n Financials_data = data[data.Type == \"Financials\"]\n Health_Care_data = data[data.Type == \"Health Care\"]\n Industrials_data = data[data.Type == \"Industrials\"]\n Information_Technology_data = data[data.Type == \"Information Technology\"]\n Materials_data = data[data.Type == \"Materials\"]\n Real_Estate_data = data[data.Type == \"Real Estate\"]\n Telecommunication_Services_data = data[data.Type == \"Telecommunication Services\"]\n Utilities_data = data[data.Type == \"Utilities\"]\n\n Consumer_Discretionary_PData = Consumer_Discretionary_data[Consumer_Discretionary_data.columns[0]]\n Consumer_Staples_PData = Consumer_Staples_data[Consumer_Staples_data.columns[0]]\n Energy_PData = Energy_data[Energy_data.columns[0]]\n Financials_PData = Financials_data[Financials_data.columns[0]]\n Health_Care_PData = Health_Care_data[Health_Care_data.columns[0]]\n Industrials_PData = Industrials_data[Industrials_data.columns[0]]\n Information_Technology_PData = Information_Technology_data[Information_Technology_data.columns[0]]\n Materials_PData = Materials_data[Materials_data.columns[0]]\n Real_Estate_PData = Real_Estate_data[Real_Estate_data.columns[0]]\n Telecommunication_Services_PData = Telecommunication_Services_data[Telecommunication_Services_data.columns[0]]\n Utilities_PData = Utilities_data[Utilities_data.columns[0]]\n\n\n Plotter(Consumer_Discretionary_PData, \"Consumer Discretionary\")\n Plotter(Consumer_Staples_PData, \"Consumer Staples\")\n Plotter(Energy_PData, \"Energy\")\n Plotter(Financials_PData, \"Financials\")\n Plotter(Health_Care_PData, \"Health Care\")\n Plotter(Industrials_PData, \"Industrials\")\n Plotter(Information_Technology_PData, \"Information Technology\")\n Plotter(Materials_PData, \"Materials\")\n Plotter(Real_Estate_PData, \"Real Estate\")\n Plotter(Telecommunication_Services_PData, \"Telecommunication Services\")\n Plotter(Utilities_PData, \"Utilities\")\n\ndef Plotter(Sector_Data, Sector_Name):\n PATH = r\"C:/Users/Isaac/Desktop/DataProjects/HistoricalPrices/\"\n fileNames = os.listdir(PATH)\n fileNames = [file for file in fileNames if '.csv' in file]\n print(\"\\n\")\n for line in Sector_Data:\n df = pd.read_csv(PATH + line + \".csv\", index_col=6)\n df['Date'] = pd.to_datetime(df['Date'])\n init = df['open'].iloc[0]\n df['open'] = df['open']/init\n plt.plot(df['Date'], df['open'], label = line)\n plt.rc('legend', fontsize=7)\n plt.legend()\n plt.title(Sector_Name + 'stocks ASX500')\n plt.xlabel('Date')\n plt.ylabel('Open Price per day')\n plt.show()\n\nif __name__ == \"__main__\":\n getTickers()\n #Scrape Historic and summary data\n print(\"Do you wish to Scrape new summary and Historical Data (y/n)?\")\n yes = {'yes', 'y', 'ye', ''}\n no = {'no', 'n'}\n choice = input().lower()\n if choice in yes:\n print(\"skipped updating data\")\n elif choice in no:\n for i in range(0, len(tickers_array)):\n ticker = tickers_array[i][0]\n print(\"Fetching data for %s\" % (ticker))\n HistoricData(ticker)\n scraped_data = parse(ticker)\n print(\"Writing data to output file\")\n with open(r\"JsonOutput\\%s-summary.json\" % (ticker), 'w') as fp:\n json.dump(scraped_data, fp, indent=4)\n fp.close()\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no'\")\n sorter()\n\n\n","sub_path":"Scraper1.py","file_name":"Scraper1.py","file_ext":"py","file_size_in_byte":8536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246072694","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\n\ndata = pd.read_csv('data/Combined_News_DJIA.csv')\n# 通过过去的股票预测现在的股票\ntrain = data[data['Date'] < '2015-01-01']\ntest = data[data['Date'] > '2014-12-31']\n\n# 数据预处理\nexample = train.iloc[3,10]\nexample2 = example.lower()\nexample3 = CountVectorizer().build_tokenizer()(example2) # 分词\n\n# 词频\nprint(pd.DataFrame([[x,example3.count(x)] for x in set(example3)], columns = ['Word', 'Count']))\n\n# 构建词库\ntrainheadlines = []\nfor row in range(0,len(train.index)):\n trainheadlines.append(' '.join(str(x) for x in train.iloc[row,2:27]))\nbasicvectorizer = CountVectorizer()\nbasictrain = basicvectorizer.fit_transform(trainheadlines)\n# basictrain.shape (1611, 31675) 有1611条数据,有31675个不同的单词\n\ntestheadlines = []\nfor row in range(0,len(test.index)):\n testheadlines.append(' '.join(str(x) for x in test.iloc[row,2:27]))\nbasictest = basicvectorizer.transform(testheadlines)\n\n# 训练逻辑回归模型\nbasicmodel = LogisticRegression()\nbasicmodel = basicmodel.fit(basictrain, train[\"Label\"])\npredictions = basicmodel.predict(basictest)\n# 效果不好0.42\nprint(pd.crosstab(test[\"Label\"], predictions, rownames=[\"Actual\"], colnames=[\"Predicted\"]))\n\n# 查看单词的权重\nbasicwords = basicvectorizer.get_feature_names()\nbasiccoeffs = basicmodel.coef_.tolist()[0]\ncoeffdf = pd.DataFrame({'Word' : basicwords,\n 'Coefficient' : basiccoeffs})\ncoeffdf = coeffdf.sort_values(['Coefficient', 'Word'], ascending=[0, 1])\nprint(coeffdf.head(10))\nprint(coeffdf.tail(10))\n\n# 两两组合的词频\nadvancedvectorizer = CountVectorizer(ngram_range=(2,2))\nadvancedtrain = advancedvectorizer.fit_transform(trainheadlines)\n# advancedtrain.shape (1611, 366721)\n\ntestheadlines = []\nfor row in range(0,len(test.index)):\n testheadlines.append(' '.join(str(x) for x in test.iloc[row,2:27]))\nadvancedtest = advancedvectorizer.transform(testheadlines)\n\nadvancedmodel = LogisticRegression()\nadvancedmodel = advancedmodel.fit(advancedtrain, train[\"Label\"])\nadvpredictions = advancedmodel.predict(advancedtest)\nprint(pd.crosstab(test[\"Label\"], advpredictions, rownames=[\"Actual\"], colnames=[\"Predicted\"]))\n# 0.57精度\n\nadvwords = advancedvectorizer.get_feature_names()\nadvcoeffs = advancedmodel.coef_.tolist()[0]\nadvcoeffdf = pd.DataFrame({'Words' : advwords,\n 'Coefficient' : advcoeffs})\nadvcoeffdf = advcoeffdf.sort_values(['Coefficient', 'Words'], ascending=[0, 1])\nadvcoeffdf.head(10)\nadvcoeffdf.tail(10)\n","sub_path":"9.Stock-forecast-based-on-NPL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"549923572","text":"from flask import Flask\nfrom flask import render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///death.db'\ndb = SQLAlchemy(app)\n\nclass Death(db.Model):\n __tablename__ = 'death'\n __table_args__ = {\n 'autoload': True,\n 'autoload_with': db.engine\n }\n index = db.Column(db.Integer, primary_key=True)\n \n\n@app.route(\"/\")\ndef hello():\n return render_template(\"list.html\")\n\n\n@app.route(\"/death/\")\ndef note():\n death = Death.query.all()\n return render_template(\"list.html\", death=death)\n\n\n@app.route(\"/death//\")\ndef show(index):\n death = Death.query.filter_by(index=index).first()\n return render_template(\"show.html\", death=death)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19918582","text":"import os\n\nfrom teras import logging as Log\nfrom teras.base.event import Callback\nfrom teras.training.event import TrainEvent\nfrom teras.utils.progressbar import ProgressBar\nimport teras.utils\n\n\nclass ProgressCallback(Callback):\n\n def __init__(self, name=\"progress_callback\", **kwargs):\n super(ProgressCallback, self).__init__(name, **kwargs)\n self._pbar = ProgressBar()\n self.implement(TrainEvent.EPOCH_TRAIN_BEGIN, self.init_progressbar)\n self.implement(TrainEvent.BATCH_END, self.update_progressbar)\n self.implement(TrainEvent.EPOCH_TRAIN_END, self.finish_progressbar)\n\n def init_progressbar(self, data):\n self._pbar.start(data['size'])\n self._count = 0\n\n def update_progressbar(self, data):\n self._count += data['batch_size']\n self._pbar.update(self._count)\n\n def finish_progressbar(self, data):\n self._pbar.finish()\n\n\n_reporters = []\n\n\ndef report(values):\n if _reporters:\n _reporters[-1].report(values)\n\n\nclass Reporter(Callback):\n\n def __init__(self, name=\"reporter\", **kwargs):\n super(Reporter, self).__init__(name, **kwargs)\n self._logs = {}\n self._reported = 0\n self._history = []\n\n def __enter__(self):\n _reporters.append(self)\n\n def __exit__(self, exc_type, exc_value, traceback):\n _reporters.pop()\n\n def report(self, values):\n for name, value in values.items():\n if \"accuracy\" in name:\n accuracy = self._logs.get(name, 0.0)\n if hasattr(value, \"__len__\") and len(value) == 2:\n if isinstance(accuracy, float):\n accuracy = [0, 0]\n accuracy[0] += value[0]\n accuracy[1] += value[1]\n else:\n accuracy += float(accuracy)\n values[name] = accuracy\n self._logs.update(values)\n self._reported += 1\n\n def get_summary(self):\n summary = {}\n for name, value in self._logs.items():\n if \"accuracy\" in name:\n if isinstance(value, list):\n correct, total = value[:2]\n if total == 0:\n import numpy\n accuracy = numpy.nan\n else:\n accuracy = correct / total\n else:\n accuracy = value / self._reported\n summary[name] = accuracy\n else:\n summary[name] = value\n return summary\n\n def get_history(self):\n return self._history\n\n def on_train_begin(self, data):\n self._history = []\n\n def on_epoch_train_begin(self, data):\n self._logs = {}\n self._reported = 0\n\n on_epoch_validate_begin = on_epoch_train_begin\n\n def on_epoch_train_end(self, data):\n self.report({'loss': data['loss']})\n summary = self.get_summary()\n self._output_log(\"training\", summary, data)\n self._history.append({'training': summary, 'validation': None})\n\n def on_epoch_validate_end(self, data):\n self.report({'loss': data['loss']})\n summary = self.get_summary()\n self._output_log(\"validation\", summary, data)\n self._history[-1]['validation'] = summary\n\n def _output_log(self, label, summary, data):\n message = \"[{}] epoch {} - #samples: {}, loss: {:.8f}\".format(\n label, data['epoch'], data['size'], summary['loss'])\n if 'accuracy' in summary:\n message += \", accuracy: {:.8f}\".format(summary['accuracy'])\n v = self._logs.get('accuracy', None)\n if isinstance(v, list) and v[1] > 0:\n message += \" ({}/{})\".format(v[0], v[1])\n Log.i(message)\n message = []\n for name, value in summary.items():\n if name == 'loss' or name == 'accuracy':\n continue\n if isinstance(value, float):\n message.append(\"{}: {:.8f}\".format(name, value))\n else:\n message.append(\"{}: {}\".format(name, value))\n if 'accuracy' in name:\n v = self._logs.get(name, None)\n if isinstance(v, list) and v[1] > 0:\n message[-1] += \" ({}/{})\".format(v[0], v[1])\n if message:\n Log.i(\", \".join(message))\n\n\nclass Saver(Callback):\n\n def __init__(self, model, basename, directory='', context=None,\n interval=1, save_from=None, name=\"saver\", **kwargs):\n super(Saver, self).__init__(name, **kwargs)\n self._model = model\n self._basename = os.path.join(os.path.expanduser(directory), basename)\n self._context = context\n if not isinstance(interval, int):\n raise ValueError(\"interval must be specified as int value: \"\n \"actual('{}')\".format(type(interval).__name__))\n self._interval = interval\n self._save_from = save_from\n\n def on_train_begin(self, data):\n if self._context is not None:\n context_file = self._basename + '.context'\n Log.i(\"saving the context to {} ...\".format(context_file))\n with open(context_file, 'wb') as f:\n teras.utils.dump(self._context, f)\n\n def on_epoch_end(self, data):\n epoch = data['epoch']\n if self._save_from is not None and data['epoch'] < self._save_from:\n return\n if epoch % self._interval == 0:\n model_file = \"{}.{}.pkl\".format(self._basename, epoch)\n Log.i(\"saving the model to {} ...\".format(model_file))\n with open(model_file, 'wb') as f:\n teras.utils.dump(self._model, f)\n","sub_path":"teras/training/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330186096","text":"import re\nimport numpy as np\nwith open(\"input.txt\", encoding='utf8') as f:\n grid = []\n arr= []\n n=0\n res = \"\"\n for line in f:\n \n if(\"[\" in line):\n grid.append([line[i:i+4].strip().replace(']','').replace('[','') for i in range(0,len(line),4)])\n elif n == 0 :\n arr = np.array(grid).T \n arr = np.flip(arr,axis=1).tolist()\n arr = [[i for i in item if i != ''] for item in arr]\n n+=1\n elif line.strip() ==\"\":\n continue\n else:\n a,quant,b,frm,c,to = [i for i in re.split(\" \", line.strip())]\n quant = int(quant)\n frm = int(frm)\n to = int(to)\n for i in range(0,quant): \n arr[to-1].append(arr[frm-1].pop())\n for i in arr:\n res= res + i[len(i)-1] \n\n print(res) \n\n\n","sub_path":"2022/day5/aoc51.py","file_name":"aoc51.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471758794","text":"# -*- coding: utf-8 -*-\nimport json\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport hashlib\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8090, help=\"run on the given port\", type=int)\nclass MainHandler(tornado.web.RequestHandler):\n def post(self):\n data = json.loads(self.request.body)\n appid = ''\n appsecret = ''\n timestamp = data['timestamp']\n sign = data['sign']\n thissign = hashlib.md5(appid+appsecret+str(timestamp))\n if thissign == sign:\n self.write('success')\n #处理业务逻辑\n channel_type = data['channelType']\n transaction_type = data['transactionType']\n trade_success = data['tradeSuccess']\n message_detail = data['messageDetail']\n else:\n self.write('any this except success')\n\ndef main():\n tornado.options.parse_command_line()\n application = tornado.web.Application([\n (r\"/webhook/demo/\", MainHandler),\n ])\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\nif __name__ == \"__main__\":\n main()\n","sub_path":"demo/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303458403","text":"from __future__ import print_function\nimport os\nimport sys\nfrom variables import installdir, path, module, settings_path\nfrom classes import run\nimport subprocess\n\n\nclass UWSGI(object):\n def __init__(self):\n self.address = \"127.0.0.1\"\n self.port = 3035\n self.sample = os.path.join(installdir, \"config/xdev.uwsgi.sample\")\n self.conf = conf = os.path.join(installdir, \"config/xdev.uwsgi\")\n #self.bin = '/pyenvs/%s/bin/uwsgi' % module\n self.bin = 'uwsgi'\n self.logfile = os.path.join(path, 'install/config/uwsgi.log')\n\n # Ensure pip pkg is installed\n print(\"UWSGI... \", end=\"\")\n if os.path.isfile(conf):\n print(\"config exists, \", end=\"\")\n else:\n print(\"Creating uwsgi conf... \", end=\"\")\n with open(conf, 'w') as f:\n f.write(self.content())\n print(\"OK\")\n\n if self.running:\n print(\"running.\")\n\n #link(os.path.abspath(\"config/xdev.uwsgi\"), \"/etc/uwsgi/apps-enabled/xdev.ini\")\n #run([\"service\", \"uwsgi\", \"restart\"])\n # Start uwsgi from Virtual Env\n # https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/uwsgi/\n #return txt\n self.run()\n #self.stop()\n\n def run(self):\n # Will not run if socket is in use\n if self.running:\n print(\"UWSGI is already running\")\n self.reload()\n else:\n run([self.bin, '--ini', self.conf], wait=False)\n\n def install(self):\n #pkg = Pkg(\"nginx\")\n #pkg.install()\n pass\n\n def content(self):\n \"\"\"Return config contents with replaced parameters\"\"\"\n txt = \"\"\n with open(self.sample, 'r') as f:\n txt = f.read()\n replace = [('djangoproject = /var/www/%n', 'djangoproject = '+path),\n ('virtualenv = /pyenvs/xdev/', 'virtualenv = /pyenvs/%s/' % module),\n ('#logto = uwsgi.log', 'logto = %s' % self.logfile)]\n for item in replace:\n txt = txt.replace(item[0], item[1])\n return txt\n\n @property\n def running(self):\n \"\"\"Return a list of uwsgi pid's\"\"\"\n # Use \"ps\" command to find these processes\n p1 = subprocess.Popen([\"ps\", \"ax\"], stdout=subprocess.PIPE)\n p2 = subprocess.Popen([\"grep\", self.bin],\n stdin=p1.stdout, stdout=subprocess.PIPE)\n p3 = subprocess.Popen([\"grep\", \"-v\", \"grep\"], stdin=p2.stdout, stdout=subprocess.PIPE)\n b = p3.communicate()[0]\n s = b.decode(encoding='UTF-8').strip()\n try:\n return [line.split()[0] for line in s.split('\\n')]\n except:\n return []\n\n def reload(self):\n print(\"Reloading UWSGI...\", end=\"\")\n run(['touch', os.path.join(settings_path, 'reload')], okmsg=\"OK\")\n\n def stop(self):\n pids = self.running\n if pids:\n #run([uwsgi, '--ini', conf], wait=False)\n # kill -INT `cat /tmp/project-master.pid`\n run([\"kill\", \"-INT\", pids[0]], out=True)\n","sub_path":"install/uwsgi.py","file_name":"uwsgi.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"262927263","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .serializers import UserProfileSerializer, ProgrammingLanguageSerializer, QuestionSerializer\nfrom accounts.models import UserProfile, ProgrammingLanguage\nfrom questions.models import Question\nfrom django.conf import settings\nimport json\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.generic import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver\nfrom rest_framework.decorators import api_view\nfrom bs4 import BeautifulSoup\n\nclass UserProfileListView(APIView):\n\tdef get(self, request):\n\t\tqs = UserProfile.objects.all().order_by('-points')\n\t\tserializer = UserProfileSerializer(qs, many=True)\n\t\treturn Response(serializer.data)\n\nclass ProgrammingLanguageListView(APIView):\n\tdef get(self, request):\n\t\tqs = ProgrammingLanguage.objects.all()\n\t\tserializer = ProgrammingLanguageSerializer(qs, many=True)\n\t\treturn Response(serializer.data,headers={\"Accept\":\"application/json; indent=4\"})\n\nclass TrendingQuestionListView(APIView):\n\tdef get(self, request):\n\t\tqs = Question.objects.all().order_by('-likes')[:30]\n\t\tserializer = QuestionSerializer(qs, many=True)\n\t\tdata = serializer.data\n\t\tquestion_code = ''\n\t\tfor i in range(len(data)):\n\t\t\tprint(i)\n\t\t\tquestion_code = repr(BeautifulSoup(data[i]['question'], features='html.parser').text)\n\t\t\tdata[i]['question'] = question_code\n\t\treturn Response(data)\n\nclass TrendingDeveloperListView(APIView):\n\tdef get(self, request):\n\t\tqs = UserProfile.objects.all().order_by('-points').exclude(points=0)[:10]\n\t\tserializer = UserProfileSerializer(qs, many=True)\n\t\treturn Response(serializer.data)\n\ndef list_urls(lis, acc=None):\n\tif acc is None:\n\t\tacc = []\n\tif not lis:\n\t\treturn\n\tl = lis[0]\n\tif isinstance(l, URLPattern):\n\t\tyield acc + [str(l.pattern)]\n\telif isinstance(l, URLResolver):\n\t\tyield from list_urls(l.url_patterns, acc+[str(l.pattern)])\n\tyield from list_urls(lis[1:], acc)\n\nclass OverView(APIView):\n\tdef get(self, request):\n\t\turlconf = __import__(\"api.urls\",{},{},[''])\n\t\tdata = list_urls(urlconf.urlpatterns)\n\t\tdata = list(data)\n\t\tcontext = {}\n\t\tindex = 0\n\t\tfor i in data:\n\t\t\tcontext.update({str(index):i})\n\t\t\tindex += 1\n\t\treturn Response(data)\n\nclass QuestionDetailView(APIView):\n\tdef get(self, request, *args, **kwargs):\n\t\tq = Question.objects.filter(url=kwargs['url'])\n\t\tif q.exists():\n\t\t\tqw = Question.objects.get(url=kwargs['url'])\n\t\t\tserializer = QuestionSerializer(q, many=True)\n\t\t\tdata = serializer.data\n\t\t\tquestion_code = repr(BeautifulSoup(data[0]['question'], features='html.parser').text)\n\t\t\tdata[0]['question'] = question_code\n\t\t\treturn Response(data)\n\t\telse:\n\t\t\treturn Response({\"error\":\"Question not found\"})\n\n\nclass CanLogin(View):\n\t@method_decorator(csrf_exempt)\n\tdef dispatch(self, request, *args, **kwargs):\n\t return super(CanLogin, self).dispatch(request, *args, **kwargs)\n\n\tdef post(self, request):\n\t\tdata = {\"canLogin\": \"False\"}\n\t\tusername = request.POST.get('username') or None\n\t\tpassword = request.POST.get('password') or None\n\n\t\tu = User.objects.filter(username=username)\n\t\tif u.exists():\n\t\t\tu = u[0]\n\t\t\tif u.check_password(password):\n\t\t\t\tdata['canLogin'] = 'True'\n\t\treturn JsonResponse(data)\n\n\nclass GetUserProfile(View):\n\t@method_decorator(csrf_exempt)\n\tdef dispatch(self, request, *args, **kwargs):\n\t\treturn super(GetUserProfile, self).dispatch(request, *args, **kwargs)\n\n\tdef post(self, request):\n\t received_json_data = json.loads(request.body.decode('utf-8'))\n\t pk = received_json_data[\"pk\"]\n\t user_profile = UserProfile.objects.filter(id=pk)\n\t if user_profile.exists():\n\t user_profile = user_profile[0]\n\t data = UserProfileSerializer(user_profile, many=False)\n\t data = data.data\n\t else:\n\t data = {\"error\":\"User does not exist\"}\n\t return JsonResponse(data)\n\n\nclass GetLanguage(View):\n\t@method_decorator(csrf_exempt)\n\tdef dispatch(self, request, *args, **kwargs):\n\t\treturn super(GetLanguage, self).dispatch(request, *args, **kwargs)\n\n\tdef post(self, request):\n\t received_json_data = json.loads(request.body)\n\t pk = received_json_data[\"pk\"]\n\t language = ProgrammingLanguage.objects.filter(id=pk)\n\t if language.exists():\n\t language = language[0]\n\t data = ProgrammingLanguageSerializer(language, many=False)\n\t data = data.data\n\t else:\n\t data = {\"error\":\"User does not exist\"}\n\t return JsonResponse(data)\n\nclass RegisterUser(View):\n\t@method_decorator(csrf_exempt)\n\tdef dispatch(self, request, *args, **kwargs):\n\t\treturn super(RegisterUser, self).dispatch(request, *args, **kwargs)\n\n\tdef post(self, request):\n\t\tdata = json.loads(request.body) or None\n\t\tusername = data['username'] or None\n\t\tpassword = data['password'] or None\n\t\tdata = {}\n\t\tif username is not None and password is not None:\n\t\t\tfilter_user = User.objects.filter(username=username)\n\t\t\tif filter_user.exists():\n\t\t\t\tdata['status'] = \"Invalid\"\n\t\t\t\tdata['error'] = 'User Already Exists'\n\t\t\t\treturn JsonResponse(data)\n\t\t\tif len(password) >= 8:\n\t\t\t\tuser = User.objects.create_user(username=username, password=password)\n\t\t\t\tdata['status'] = 'Created'\n\t\t\t\tdata['error'] = 'None'\n\t\t\telse:\n\t\t\t\tdata['status'] = 'Invalid'\n\t\t\t\tdata['error'] = 'Invalid Credentials'\n\t\telse:\n\t\t\tdata['status'] = 'Invalid'\n\t\t\tdata['error'] = 'No Credentials'\n\t\treturn JsonResponse(data)\n\n\nclass GetQuestionsByUser(View):\n\t@method_decorator(csrf_exempt)\n\tdef dispatch(self, request, *args, **kwargs):\n\t return super().dispatch(request, *args, **kwargs)\n\n\tdef post(self, request):\n\t\tbody = request.body or None\n\t\tif body == None:\n\t\t\treturn JsonResponse({})\n\t\tr_body = json.loads(body) or None\n\t\tif r_body == None:\n\t\t\treturn JsonResponse({})\n\t\tusername = r_body['username'] or None\n\t\tif username != None:\n\t\t\tuser = User.objects.filter(username=username)\n\t\t\tif user.exists():\n\t\t\t\tquestions = Question.objects.filter()\n\t\t\t\tserializer = QuestionSerializer(questions, many=True)\n\t\t\t\treturn JsonResponse(serializer.data, safe=False)\n\t\t\telse:\n\t\t\t\treturn JsonResponse({\"error\":\"User does not exists\"})\n\t\telse:\n\t\t\treturn JsonResponse({})\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510370491","text":"import os\nimport sys\nimport fnmatch\n#Check for existence of \"pageProperties.config\"\n #Number of Pages (Int)\n #Single Page (Bool)\n #Navbar Type (Must be parsed.)\n #Number Sections\n #Color Scheme Primary Color 1 (Color)\n #Color Scheme Primary Color 2 (Color)\n #Color Scheme Secondary Color 1 (Color)\n #Color Scheme Secondary Color 2 (Color)\n #Color Scheme Extra Color 1 (Color)\n #jsFadeIn (Bool)\n #jsScrollableNavbar (Bool)\n #jsLightableNavbar (Bool)\n #jsDissapearMobileButton (Bool)\n\ngetDocumentsGood = False\nmakeLocalVariables = False\n\nprint(\"paSetPageAttributes Script | Copyright Austin Fell 2016\")\nhomeFolder = os.getcwd()\nscriptFolder = os.path.dirname(os.path.realpath(sys.argv[0]))\n\nprint(\"Current directory is: \" + homeFolder)\nprint(\"If this is wrong, please check your directory and if it was installed correctly.\")\nprint('\\n')\n\ncssResults = []\nfor root, dirs, files in os.walk(homeFolder):\n for _file in files:\n if fnmatch.fnmatch(_file, '*.css'):\n cssResults.append(os.path.join(root, _file))\nprint(\"The following CSS documents were discovered.\")\nprint(cssResults)\nprint('\\n')\n\nHTMLResults = []\nfor root, dirs, files in os.walk(homeFolder):\n for _file in files:\n if fnmatch.fnmatch(_file, '*.html'):\n HTMLResults.append(os.path.join(root, _file))\nprint(\"The following HTML documents were discovered.\")\nprint(HTMLResults)\nprint('\\n')\n\nJSResults = []\nfor root, dirs, files in os.walk(homeFolder):\n for _file in files:\n if fnmatch.fnmatch(_file, '*.js'):\n JSResults.append(os.path.join(root, _file))\nprint(\"The following Javascript documents were discovered.\")\nprint(JSResults)\nprint('\\n')\n\nprint (\"Checking for existence of paths.config inside of SWDL directory.\")\nif (os.path.isfile(os.path.join(os.getcwd(), 'paths.config'))):\n print(\"File already exists.\")\nelse:\n print(\"File does not exist. Initializing data.\")\n pathsData = open('paths.config', 'w')\n pathsData.write(\"css.parseFiles\")\n pathsData.write(\"\\n\")\n for i in cssResults:\n pathsData.write(\" \" + i)\n pathsData.write(\"\\n\")\n pathsData.write(\"css.parseEnd\")\n pathsData.write(\"\\n\")\n pathsData.write(\"html.parseFiles\")\n pathsData.write(\"\\n\")\n for i in HTMLResults:\n pathsData.write(\" \" + i)\n pathsData.write(\"\\n\")\n pathsData.write(\"html.parseEnd\")\n pathsData.write(\"\\n\")\n pathsData.write(\"js.parseFiles\")\n pathsData.write(\"\\n\")\n for i in JSResults:\n pathsData.write(\" \" + i)\n pathsData.write(\"\\n\")\n pathsData.write(\"js.parseEnd\")\n pathsData.write(\"\\n\")\n getDocumentsGood = True\n\nif getDocumentsGood is True:\n if (os.path.isfile(os.path.join(os.getcwd(), 'swdlData.szi'))):\n print(\"File already exists.\")\n else:\n pathsData = open('project.config', 'w')\n pathsData.write(\"color PRIMARY_BG_COLOR_1 : UNDEFINED\")\n pathsData.write(\"\\n\")\n pathsData.write(\"color PRIMARY_BG_COLOR_2 : UNDEFINED\")\n pathsData.write(\"\\n\")\n pathsData.write(\"color PRIMARY_FG_COLOR_1 : UNDEFINED\")\n pathsData.write(\"\\n\")\n pathsData.write(\"color PRIMARY_FG_COLOR_2 : UNDEFINED\")\n pathsData.write(\"\\n\")\n pathsData.write(\"bool pathsInitialized : True\")\n pathsData.write(\"\\n\")\n pathsData.write(\"bool colorsInitialized : True\")\n pathsData.write(\"\\n\")\n pathsData.write(\"bool swdlDiagnosticsRan : False\")\n pathsData.write(\"\\n\")\n\nprint(\"Project file successfully written\")\n","sub_path":"target/release/plugins/std/pa-SetPageAttributes/pa-Set.py","file_name":"pa-Set.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430902990","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\n#url input was made to 'https://fundamentus.com.br/detalhes.php'\nclass Fundamentus:\n def __init__(self, url):\n self.url = url\n self.ativos = []\n self.headers = {'User-Agent': 'Chrome/81.0.4044.138'\n }\n \n #the next method will return all possible tickers from Fundamentu's database\n def get_tickers(self):\n ticker_request = requests.get(self.url, headers=self.headers)\n tickers = pd.read_html(ticker_request.text)[0]\n \n for papeis in tickers['Papel']:\n self.ativos.append(papeis)\n \n #right here we make an instance of a DataFrame object for filling it afterwards\n #some developers would say it's not the bet option for handling DataFrames but+\n #considering it's an old script and it's functional, I didn't want to change it\n def init_dataframe(self):\n indx = [\"Ticker\",\n \"Tipo de Ação\",\n \"Cotação\",\n \"Valor de Mercado\",\n \"Valor da Firma\",\n \"Ativo\",\n \"Nro. de Ações\",\n \"Oscilação 2021\",\n \"Oscilação 2020\",\n \"Oscilação 2019\",\n \"P/L\",\n \"LPA\",\n \"VPA\",\n \"P/EBIT\",\n \"Marg. Bruta\",\n \"PSR\",\n \"Marg. EBIT\",\n \"P/Ativos\",\n \"Margem Líquida\",\n \"P/Cap. Giro\",\n \"EBIT/Ativo\",\n \"P/Ativo Cic. Liq.\",\n \"ROIC\",\n \"DY\",\n \"ROE\",\n \"EV/EBIT\",\n \"Div Br/Patrim.\",\n \"Cres Rec. 5a\",\n \"Giro Ativos\",\n \"Disponibilidade\",\n \"Div. Bruta\",\n \"Div. Líquida\",\n \"Patrim. Liq.\",\n \"Receita Liq. 1a\",\n \"EBIT 1a\",\n \"Lucro Liq. 1a\"]\n df = pd.DataFrame(index=indx)\n \n self.retrieve_data(df=df)\n \n #this method will retrieve all data from all possible tickers, create an object+\n #with the returned values and append it to the dataframe\n def retrieve_data(self, df):\n for element in self.ativos:\n url = f'https://fundamentus.com.br/detalhes.php?papel={element}'\n ticker_request = requests.get(url, headers=self.headers)\n soup = BeautifulSoup(ticker_request.content, 'html.parser')\n\n try:\n ticker = element\n except:\n ticker = 'N/A'\n try:\n mbruta = soup.findAll('td', 'data')[22].span.text.replace('\\n', '')\n except:\n mbruta = 0\n try:\n pebit = soup.findAll('td', 'data w2')[5].span.text.replace('\\n', '')\n except:\n pebit = 0\n try:\n psr = soup.findAll('td', 'data')[24].span.text.replace('\\n', '')\n except:\n psr = 0\n try:\n mebit = soup.findAll('td', 'data')[25].span.text.replace('\\n', '')\n except:\n mebit = 0\n try:\n pativo = soup.findAll('td', 'data')[27].span.text.replace('\\n', '')\n except:\n pativo = 0\n try:\n margliq = soup.findAll('td', 'data')[28].span.text.replace('\\n', '')\n except:\n margliq = 0\n try:\n pcapg = soup.findAll('td', 'data')[30].span.text.replace('\\n', '')\n except:\n pcapg = 0\n try:\n ebitativo = soup.findAll('td', 'data')[31].span.text.replace('\\n', '')\n except:\n ebitativo = 0\n try:\n patcili = soup.findAll('td', 'data')[33].span.text.replace('\\n', '')\n except:\n patcili = 0\n try:\n roic = soup.findAll('td', 'data')[34].span.text.replace('\\n', '')\n except:\n roic = 0\n try:\n dy = soup.findAll('td', 'data')[36].span.text.replace('\\n', '')\n except:\n dy = 0\n try:\n roe = soup.findAll('td', 'data')[37].span.text.replace('\\n', '')\n except:\n roe = 0\n try:\n evebitda = soup.findAll('td', 'data')[39].span.text.replace('\\n', '')\n except:\n evebitda = 0\n try:\n liqcor = soup.findAll('td', 'data')[40].span.text.replace('\\n', '')\n except:\n liqcor = 0\n try:\n evebit = soup.findAll('td', 'data')[42].span.text.replace('\\n', '')\n except:\n evebit = 0\n try:\n divbr = soup.findAll('td', 'data')[43].span.text.replace('\\n', '')\n except:\n divbr = 0\n try:\n cres5a = soup.findAll('td', 'data')[45].span.text.replace('\\n', '')\n except:\n cres5a = 0\n try:\n giroa = soup.findAll('td', 'data')[46].span.text.replace('\\n', '')\n except:\n giroa = 0\n try:\n dispo = soup.findAll('td', 'data')[49].span.text.replace('\\n', '')\n except:\n dispo = 0\n try:\n ativoc = soup.findAll('td', 'data')[51].span.text.replace('\\n', '')\n except:\n ativoc = 0\n try:\n divb = soup.findAll('td', 'data')[48].span.text.replace('\\n', '')\n except:\n divb = 0\n try:\n divl = soup.findAll('td', 'data')[50].span.text.replace('\\n', '')\n except:\n divl = 0\n try:\n patliq = soup.findAll('td', 'data')[52].span.text.replace('\\n', '')\n except:\n patliq = 0\n try:\n recliq = soup.findAll('td', 'data')[53].span.text.replace('\\n', '')\n except:\n recliq = 0\n try:\n ebit = soup.findAll('td', 'data')[55].span.text.replace('\\n', '')\n except:\n ebit = 0\n try:\n lucliq = soup.findAll('td', 'data')[57].span.text.replace('\\n', '')\n except:\n lucliq = 0\n try:\n tipo = soup.findAll('td', 'data')[2].span.text\n except:\n tipo = 0\n try:\n cota = soup.find('td', 'data destaque w3').span.text\n except:\n cota = 0\n try:\n valorm = soup.findAll('td', 'data w3')[0].span.text\n except:\n valorm = 0\n try:\n valorf = soup.findAll('td', 'data w3')[1].span.text\n except:\n valorf = 0\n try:\n ativo = soup.findAll('td', 'data w3')[2].span.text\n except:\n ativo = 0\n try:\n nroa = soup.findAll('td', 'data')[13].span.text\n except:\n nroa = 0\n try:\n osc2021 = soup.findAll('td', 'data w1')[4].span.text\n except:\n osc2021 = 0\n try:\n osc2020 = soup.findAll('td', 'data w1')[5].span.text\n except:\n osc2020 = 0\n try:\n osc2019 = soup.findAll('td', 'data w1')[6].span.text\n except:\n osc2019 = 0\n try:\n pl = soup.findAll('td', 'data w2')[1].span.text\n except:\n pl = 0\n try:\n lpa = soup.findAll('td', 'data w2')[2].span.text\n except:\n lpa = 0\n try:\n pvp = soup.findAll('td', 'data w2')[3].span.text\n except:\n pvp = 0\n try:\n vpa = soup.findAll('td', 'data w2')[4].span.text\n except:\n vpa = 0\n\n data = {\"Ticker\": ticker,\n \"Tipo de Ação\": tipo,\n \"Cotação\": cota,\n \"Valor de Mercado\": valorm,\n \"Valor da Firma\": valorf,\n \"Ativo\": ativo,\n \"Nro. de Ações\": nroa,\n \"Oscilação 2021\": osc2021,\n \"Oscilação 2020\": osc2020,\n \"Oscilação 2019\": osc2019,\n \"P/L\": pl,\n \"LPA\": lpa,\n \"VPA\": vpa,\n \"P/EBIT\": pebit,\n \"Marg. Bruta\": mbruta,\n \"PSR\": psr,\n \"Marg. EBIT\": mebit,\n \"P/Ativos\": pativo,\n \"Margem Líquida\": margliq,\n \"P/Cap. Giro\": pcapg,\n \"EBIT/Ativo\": ebitativo,\n \"P/Ativo Cic. Liq.\": patcili,\n \"ROIC\": roic,\n \"DY\": dy,\n \"ROE\": roe,\n \"EV/EBIT\": evebit,\n \"Div Br/Patrim.\": divbr,\n \"Cres Rec. 5a\": cres5a,\n \"Giro Ativos\": giroa,\n \"Disponibilidade\": ativoc,\n \"Div. Bruta\": divb,\n \"Div. Líquida\": divl,\n \"Patrim. Liq.\": patliq,\n \"Receita Liq. 1a\": recliq,\n \"EBIT 1a\": ebit,\n \"Lucro Liq. 1a\": lucliq}\n\n df = pd.DataFrame.append(self=df, other=data, ignore_index=True)\n self.export_to_csv(df)\n\n #now we get the DataFrame completed, clean it and export to a csv file\n def export_to_csv(self, df):\n df = df.dropna(how='all')\n df = df.drop_duplicates(subset=['Ticker'])\n df.reset_index(inplace = True, drop = True)\n df.index = df.index + 1\n df.to_csv('compile.csv')\n\n #then, all you need to do is run the 'run_script' function for it to call+\n #all the other methods for getting it done\n def run_script(self):\n self.get_tickers()\n self.init_dataframe()\n\n\n#run the python script whenever you'd like and it'll do all the things automatically. Hope you enjoy :)\nif __name__ == '__main__':\n run = Fundamentus('https://fundamentus.com.br/detalhes.php')\n run.run_script()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"219532901","text":"from QUBEKit.ligand import Protein\n\nimport unittest\nimport os\nimport tempfile\nfrom shutil import copy\n\n\nclass TestProteins(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Set up a protein testing class, make temp folder and copy capped leu file\n \"\"\"\n\n self.home = os.getcwd()\n self.test_folder = os.path.join(os.path.dirname(__file__), 'files')\n\n # Make the temp folder and move there with the required files\n with tempfile.TemporaryDirectory() as temp:\n os.chdir(temp)\n copy(os.path.join(self.test_folder, 'capped_leu.pdb'), 'capped_leu.pdb')\n self.molecule = Protein('capped_leu.pdb')\n\n def test_xml_generation(self):\n\n # Check all atoms are found in pdb file\n self.assertEqual(len(self.molecule.atoms), 31)\n\n # Check that every bond has been a length\n self.assertEqual(len(self.molecule.topology.edges), len(self.molecule.bond_lengths))\n\n # Check for angles and torsions too\n\n def tearDown(self):\n \"\"\"Remove the files produced during testing\"\"\"\n os.chdir(self.home)\n\n\nif __name__ == '__main__':\n\n unittest.main()\n","sub_path":"QUBEKit/tests/protein_tests.py","file_name":"protein_tests.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"29618626","text":"class Solution(object):\n def rotatedDigits(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n count = 0\n for i in range(1, N+1):\n if any(x for x in str(i) if x in [\"3\",\"4\",\"7\"]):continue\n if any(x for x in str(i) if x in [\"2\",\"5\",\"6\",\"9\"]):\n count += 1\n return count\n\nsol = Solution()\nprint(sol.rotatedDigits(10))","sub_path":"LeetCodeContests/others/rotating.py","file_name":"rotating.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138980836","text":"'''\nCopyright(C) 2016 Engineering Department, University of Cambridge, UK.\n\nLicense\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nAuthor\n Gilles Degottex \n'''\n\nimport sys\nimport os\nimport numpy as np\nimport shutil\nimport glob\nimport datetime\n\nimport scipy.io\n\nfrom lib import sigproc as sp\nfrom . import resampling\n\ndef avread(filename):\n '''\n Read any audio\n '''\n tmpoutfname = sp.gentmpfile('avread.wav')\n try:\n os.system('avconv -i '+filename+' '+tmpoutfname+' >/dev/null 2>&1')\n #os.system('avconv -i '+filename+' '+tmpoutfname)\n\n wav, fs, enc = wavread(tmpoutfname)\n except:\n if os.path.exists(tmpoutfname): os.remove(tmpoutfname)\n raise\n\n if os.path.exists(tmpoutfname): os.remove(tmpoutfname)\n\n return wav, fs, enc\n\ndef wavread(filename):\n if sp.use_pysndfile:\n wav, fs, enc = pysndfile.sndio.read(filename)\n else:\n fs, wav = scipy.io.wavfile.read(filename)\n if wav.dtype!='float32' and wav.dtype!='float16':\n wav = wav / float(np.iinfo(wav.dtype).max)\n enc = wav.dtype\n\n return wav, fs, enc\n\ndef wavgetfs(filename):\n if sp.use_pysndfile:\n _, fs, _ = pysndfile.sndio.read(filename)\n else:\n fs, _ = scipy.io.wavfile.read(filename)\n\n return fs\n\ndef wavwrite(filename, wav, fs, enc=None, norm_max_ifneeded=False, norm_max=False, verbose=0):\n\n if norm_max:\n wav_max = np.max(np.abs(wav))\n wav /= 1.05*wav_max\n\n elif norm_max_ifneeded:\n wav_max = np.max(np.abs(wav))\n if wav_max>=1.0:\n print(' WARNING: sigproc.wavwrite: waveform in file {} is clipping. Rescaling between [-1,1]'.format(filename))\n wav /= 1.05*wav_max\n\n if np.max(np.abs(wav))>=1.0:\n print(' WARNING: sigproc.wavwrite: waveform in file {} is clipping.'.format(filename))\n\n if sp.use_pysndfile:\n if enc==None: enc='pcm16'\n pysndfile.sndio.write(filename, wav, rate=fs, format='wav', enc=enc)\n else:\n if enc==None: enc = np.int16\n elif enc=='pcm16':\n enc = np.int16\n elif enc=='float32':\n raise ValueError('float not supported by scipy.io.wavfile')\n wav = wav.copy()\n wav = enc(np.iinfo(enc).max*wav)\n scipy.io.wavfile.write(filename, fs, wav)\n if verbose>0:\n print(' Output: '+filename)\n\ndef exportfile( srcf, # Source file to export\n destf, # Destination path to export to\n resample=None, # [Hz] Resample the waveform the given frequency (e.g. 44100Hz).\n highpass_fcut=None, # [Hz] High-pass the waveform according to the given frequency\n normalize=None, # [dB] Normalise the overall file amplitude to the given amplitude (e.g. -32dB)\n aligndelayref=None, # [filepath] Align temporally the source waveform to the given waveform file.\n usepcm16=False, # Save the waveform using PCM16 sample format\n channelid=0 # Use only the first channel (left) if multiple channels are found.\n ):\n\n orifs = None\n\n if resample==None and normalize==None and usepcm16==False and aligndelayref==None and highpass_fcut==None:\n # Copy/Paste the original file, without normalization\n shutil.copy2(srcf, destf)\n else:\n wav, orifs, enc = wavread(srcf)\n if len(wav.shape)>1:\n wav = wav[:,channelid] # Keep only channelid in case multiple tracks are present.\n wavfs = orifs\n ##print(\"{:10.3f}\".format(len(wav)/float(wavfs))+'s '+str(wavfs)+'Hz '+enc)\n if usepcm16:\n enc = 'pcm16'\n\n if resample!=None:\n wav = resampling.resample(wav, wavfs, resample)\n wavfs = resample\n\n if highpass_fcut!=None:\n (b, a) = scipy.signal.butter(4, highpass_fcut/(0.5*wavfs), btype='high')\n wav = scipy.signal.filtfilt(b, a, wav)\n\n if normalize!=None:\n wav_spn = sp.level_normalise(wav, wavfs, level=normalize, warn_onclip=False)\n # wav_sv56, _ = interfaces.sv56demo(wav, wavfs, level=normalize)\n\n if 0:\n import matplotlib.pyplot as plt\n plt.ion()\n plt.plot(wav, 'k')\n plt.plot(wav_sv56, 'b')\n plt.plot(wav_spn, 'r')\n from IPython.core.debugger import Pdb; Pdb().set_trace()\n\n wav = wav_spn\n\n if aligndelayref!=None:\n # Re-load the first tag as reference\n refwav, refwavfs, refenc = wavread(aligndelayref)\n wav = sp.align_delay(wav, wavfs, refwav, refwavfs)\n\n wavwrite(destf, wav, fs=wavfs, enc=enc)\n\n return orifs\n\ndef waveforms_statistics(dirs):\n\n print('Scanning: {}'.format(dirs))\n\n fss = dict()\n durations = []\n\n for indir in dirs:\n print('Scanning {}'.format(indir))\n for infile in glob.glob(indir+'/*'):\n if os.path.isdir(infile): continue\n print('\\r'+str(len(durations))+' files: '+os.path.basename(infile)+' \\r'),\n sys.stdout.flush()\n try:\n # wavfs, wav = wavfile.read(infile)\n wav, wavfs, _ = wavread(infile)\n\n fss[wavfs] = 1\n\n durations.append(len(wav)/float(wavfs))\n except:\n pass\n\n print('\\r \\r'),\n\n durations = np.array(durations)\n\n print(\"Files: {}\".format(len(durations)))\n print(\"Sampling frequency: {}\".format(fss.keys()))\n print(\"Durations in [{:0.4},{:0.4}]s\".format(np.min(durations), np.max(durations)))\n print(\"Total duration: {:0.4f}s ({})\".format(np.sum(durations), datetime.timedelta(seconds=np.sum(durations))))\n","sub_path":"lib/sigproc/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":6341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200082739","text":"import os\nimport setting\n# path\n# path_num 类型题共选多少个\n\nclass GetFile:\n\n def __init__(self,path,path_num):\n self.path = path\n self.ll = [] # 文件目录\n self.path_num = path_num\n self.num = []\n with os.scandir(self.path) as entries:\n for entry in entries:\n self.ll.append(entry.name)\n print(self.ll)\n\n def getMax_num(self,s):\n return int(s[s.index(\"(\")+1 : s.index(\"题\")])\n\n def getEch_num(self): # 每个文件出题目数\n sum = 0\n for i in self.ll:\n # print(GetFile.getMax_num(self,i))\n sum += GetFile.getMax_num(self,i)\n print(\"题目总数:\" + str(sum))\n for k in self.ll:\n self.num.append(round(GetFile.getMax_num(self,k)/sum * self.path_num))\n return self.num\n\n\n def getEach_max_num(self): #每个文件最大题目数\n max_num = []\n for i in self.ll:\n max_num.append(GetFile.getMax_num(self,i))\n return max_num\n\n def getLL(self): # 每个文件路径\n return self.ll\n\n# if __name__ == '__main__':\n# s = GetFile(setting.two,setting.two_num).getEch_num()\n# print(s)","sub_path":"control/File_get.py","file_name":"File_get.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612374299","text":"def read_data(queue_, should_stop):\n while not should_stop.is_set():\n # calculate/get cX, cY, angle\n queue_.put((cX, cY, angle))\n # sleep 30ms\n\ndef send_to_ardunio(queue_, should_stop):\n while not should_stop.is_set():\n data = queue_.get()\n cX, cY, angle = data\n # update ardunio\n\ndef tell_when_to_stop(should_stop):\n # detect when to stop, and set the Event. For example, we'll wait for 10 seconds and then ask all to stop\n time.sleep(10)\n should_stop.set()\n\n\nqueue_ = Queue()\nshould_stop = Event()\n\nthread_stop_decider = Thread(target=tell_when_to_stop, args=(should_stop,))\nthread_read = Thread(target=read_data, args=(queue_, should_stop))\nthread_ardunio = Thread(target=send_to_ardunio, args=(queue_, should_stop))\n\nthread_read.start()\nthread_ardunio.start()\nthread_stop_decider.start()\n\ntry:\n while thread_read.is_alive():\n thread_read.join(1)\nexcept KeyboardInterrupt:\n should_stop.set()\nthread_read.join()\nthread_ardunio.join()\nthread_stop_decider.join()","sub_path":"backend.1/examplecode1.py","file_name":"examplecode1.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"497911343","text":"import json\nfrom keycloak.admin import KeycloakAdminBase\nfrom keycloak.utils import to_camel_case \nfrom collections import OrderedDict\nfrom .clientroles import ClientRoles\n\n__all__ = ('Client', 'Clients',)\n\n# https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_clientrepresentation\nCLIENTS_KWARGS = [\n 'access',\n 'adminUrl',\n 'attributes',\n 'authenticationFlowBindingOverrides',\n 'authorizationServicesEnabled',\n 'authorizationSettings',\n 'baseUrl',\n 'bearerOnly',\n 'clientAuthenticatorType',\n 'clientId',\n 'consentRequired',\n 'defaultClientScopes',\n 'defaultRoles',\n 'description',\n 'directAccessGrantsEnabled',\n 'enabled',\n 'frontchannelLogout',\n 'fullScopeAllowed',\n 'id',\n 'implicitFlowEnabled',\n 'name',\n 'nodeReRegistrationTimeout',\n 'notBefore',\n 'optionalClientScopes',\n 'origin',\n 'protocol',\n 'protocolMappers',\n 'publicClient',\n 'redirectUris',\n 'registeredNodes',\n 'registrationAccessToken',\n 'rootUrl',\n 'secret',\n 'serviceAccountsEnabled',\n 'standardFlowEnabled',\n 'surrogateAuthRequired',\n 'webOrigins',\n]\n\n\nclass Clients(KeycloakAdminBase):\n _realm_name = None\n _paths = {\n 'collection': '/auth/admin/realms/{realm}/clients'\n }\n\n def __init__(self, realm_name, *args, **kwargs):\n self._realm_name = realm_name\n super(Clients, self).__init__(*args, **kwargs)\n\n def all(self):\n return self._client.get(\n self._client.get_full_url(\n self.get_path('collection', realm=self._realm_name)\n )\n )\n\n def by_id(self, id):\n return Client(client=self._client, realm_name=self._realm_name, id=id)\n\n def create(self, *args, **kwargs):\n payload = OrderedDict()\n for key in kwargs:\n _key = to_camel_case(key)\n if _key in CLIENTS_KWARGS:\n payload[_key] = kwargs[key]\n\n\n return self._client.post(\n url=self._client.get_full_url(\n self.get_path('collection', realm=self._realm_name)\n ),\n data=json.dumps(payload)\n )\n\n\n\nclass Client(KeycloakAdminBase):\n _id = None\n _realm_name = None\n _paths = {\n 'single': '/auth/admin/realms/{realm}/clients/{id}'\n }\n\n def __init__(self, realm_name, id, *args, **kwargs):\n self._id = id\n self._realm_name = realm_name\n super(Client, self).__init__(*args, **kwargs)\n\n @property\n def roles(self):\n return ClientRoles(client=self._client, client_id=self._id,\n realm_name=self._realm_name)\n\n def update(self, *args, **kwargs):\n payload = OrderedDict()\n for key in kwargs:\n _key = to_camel_case(key)\n if _key in CLIENTS_KWARGS:\n payload[_key] = kwargs[key]\n\n\n return self._client.put(\n url=self._client.get_full_url(\n self.get_path('single', realm=self._realm_name, id=self._id)\n ),\n data=json.dumps(payload)\n )\n\n def delete(self):\n\n return self._client.delete(\n url=self._client.get_full_url(\n self.get_path('single', realm=self._realm_name, id=self._id)\n )\n )\n\n\n\n\n","sub_path":"src/keycloak/admin/clients.py","file_name":"clients.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128226166","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 16 11:04:00 2018\n\n@author: SDEDU\n\"\"\"\nimport pandas as pd\nimport numpy as np\n#1)\nfpe=pd.read_csv('effort.csv')\n\n#2)\nfpe.shape\n\n#3)\nfpe.describe()\nfpe.columns\nfpe.head()\n#4)\nfpe_array=fpe.as_matrix()\n#5)\nfpe.describe()\n\n#6)\nfpe.effort.mean()\n\n#7)\nfpe[fpe.effort==0]\n\n#8)\nfpe[fpe.country=='Chile']\n\n#9)\nfpe.loc[4:20,['setting','effort']]\n#10)\nfpe.query('effort==0 & change==(1,2)')\n","sub_path":"Python/test/3번.py","file_name":"3번.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343522648","text":"# coding: utf-8\n\n\"\"\"\n \n\n [AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501\n\n OpenAPI spec version: 2.1.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.models.access_description import AccessDescription # noqa: F401,E501\n\n\nclass Provider(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'location': 'str',\n 'access_description': 'AccessDescription',\n 'supported': 'bool',\n 'type': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'location': 'location',\n 'access_description': 'accessDescription',\n 'supported': 'supported',\n 'type': 'type'\n }\n\n discriminator_value_class_map = {\n 'BankProvider': 'BankProvider'\n }\n\n def __init__(self, id=None, name=None, location=None, access_description=None, supported=None, type=None): # noqa: E501\n \"\"\"Provider - a model defined in Swagger\"\"\" # noqa: E501\n\n self._id = None\n self._name = None\n self._location = None\n self._access_description = None\n self._supported = None\n self._type = None\n self.discriminator = 'type'\n\n self.id = id\n self.name = name\n self.location = location\n if access_description is not None:\n self.access_description = access_description\n self.supported = supported\n self.type = type\n\n @property\n def id(self):\n \"\"\"Gets the id of this Provider. # noqa: E501\n\n UUID of this provider. A constant to identify provider even when e.g. their bank code changes (provided type is BankProvider) # noqa: E501\n\n :return: The id of this Provider. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Provider.\n\n UUID of this provider. A constant to identify provider even when e.g. their bank code changes (provided type is BankProvider) # noqa: E501\n\n :param id: The id of this Provider. # noqa: E501\n :type: str\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"Gets the name of this Provider. # noqa: E501\n\n Name of this provider e.g. \\\"Hamburger Bank\\\" # noqa: E501\n\n :return: The name of this Provider. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this Provider.\n\n Name of this provider e.g. \\\"Hamburger Bank\\\" # noqa: E501\n\n :param name: The name of this Provider. # noqa: E501\n :type: str\n \"\"\"\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name\n\n @property\n def location(self):\n \"\"\"Gets the location of this Provider. # noqa: E501\n\n Location of this provider e.g. \\\"Hamburg\\\" # noqa: E501\n\n :return: The location of this Provider. # noqa: E501\n :rtype: str\n \"\"\"\n return self._location\n\n @location.setter\n def location(self, location):\n \"\"\"Sets the location of this Provider.\n\n Location of this provider e.g. \\\"Hamburg\\\" # noqa: E501\n\n :param location: The location of this Provider. # noqa: E501\n :type: str\n \"\"\"\n if location is None:\n raise ValueError(\"Invalid value for `location`, must not be `None`\") # noqa: E501\n\n self._location = location\n\n @property\n def access_description(self):\n \"\"\"Gets the access_description of this Provider. # noqa: E501\n\n Description of the access for the account-setup e.g. UI-input-fields # noqa: E501\n\n :return: The access_description of this Provider. # noqa: E501\n :rtype: AccessDescription\n \"\"\"\n return self._access_description\n\n @access_description.setter\n def access_description(self, access_description):\n \"\"\"Sets the access_description of this Provider.\n\n Description of the access for the account-setup e.g. UI-input-fields # noqa: E501\n\n :param access_description: The access_description of this Provider. # noqa: E501\n :type: AccessDescription\n \"\"\"\n\n self._access_description = access_description\n\n @property\n def supported(self):\n \"\"\"Gets the supported of this Provider. # noqa: E501\n\n Whether this bank is supported by AHOI-API, i.e. whether you can use a connection of this provider. # noqa: E501\n\n :return: The supported of this Provider. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._supported\n\n @supported.setter\n def supported(self, supported):\n \"\"\"Sets the supported of this Provider.\n\n Whether this bank is supported by AHOI-API, i.e. whether you can use a connection of this provider. # noqa: E501\n\n :param supported: The supported of this Provider. # noqa: E501\n :type: bool\n \"\"\"\n if supported is None:\n raise ValueError(\"Invalid value for `supported`, must not be `None`\") # noqa: E501\n\n self._supported = supported\n\n @property\n def type(self):\n \"\"\"Gets the type of this Provider. # noqa: E501\n\n Discriminator for subtypes. At the moment only `BankProvider` is supported. # noqa: E501\n\n :return: The type of this Provider. # noqa: E501\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n \"\"\"Sets the type of this Provider.\n\n Discriminator for subtypes. At the moment only `BankProvider` is supported. # noqa: E501\n\n :param type: The type of this Provider. # noqa: E501\n :type: str\n \"\"\"\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type\n\n def get_real_child_model(self, data):\n \"\"\"Returns the real base class specified by the discriminator\"\"\"\n discriminator_value = data[self.discriminator].lower()\n return self.discriminator_value_class_map.get(discriminator_value)\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Provider):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"swagger_client/models/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536116702","text":"import os\nimport sys\nimport subprocess\n#from tkinter import *\n#from tkinter import StringVar\n#from tkinter import filedialog\n#import tkinter.messagebox\nfrom pyzbar.pyzbar import decode\nfrom PIL import Image\nimport time\n\n\"\"\"\nTODO:\n- Implement some way of knowing the side of the specimen (eg ventral, dorsal, lateral).\n - This will involve keeping track of IDs that have been scanned X amount of times already, \n perhaps 1st scan is always dorsal, 2nd is always ventral, etc. This would have to be thought of / \n implemented during the actual phototaking sessions\n - Use of dictionary (ID mapped to occurance count) is probably best\n- Look into implementing some way of reading the genus and species from the picture and sort it accordingly\n√ Implement recursive option for user\n√ Implement GUI option for user\n\"\"\"\n\nold_new_paths = []\noccurrences = dict()\ncheckMGCL = False\nSCAN_TIME = '30000'\n\"\"\"\n##############################\n# ******** GUI CODE ******** #\nclass GUI:\n window = None\n target_dir = None\n recursively = False\n\n def __init__(self, window):\n self.window = self.InitWindow(window)\n\n def InitWindow(self, window):\n # ***** GENERAL WINDOW ***** #\n window.geometry(\"500x300\")\n window.title('FLMNH Data Matrix Tool')\n #window.config(background='seashell3')\n #window.config(background='dimgray')\n\n # ***** STATUS BAR ***** #\n status_message = StringVar()\n status = Label(window, textvariable=status_message, bd=1, relief=SUNKEN, anchor=W)\n status_message.set(\"Waiting...\")\n status.pack(side=BOTTOM, fill=X)\n \n # ***** TOP MENU ***** #\n menu = Menu(window)\n window.config(menu=menu)\n subMenu = Menu(menu)\n menu.add_cascade(label=\"Help\", menu=subMenu)\n subMenu.add_command(label=\"Usage\", command=self.HelpPromt)\n\n # ***** BUTTONS ***** #\n button = Button(window, text=\"Select Folder\", command=self.SelectFolder)\n button.pack()\n\n toggle = IntVar()\n recursion_checkbox = Checkbutton(window, text='Recursive', variable=toggle, command= lambda: self.ToggleRecursive(toggle.get()))\n recursion_checkbox.pack()\n\n # review_data_checkbox = Checkbutton(window, text='Review MGCL (Legacy Cleanup)', variable=toggle, command= lambda: self.ToggleRevision(toggle.get()))\n # review_data_checkbox.pack()\n\n run_button = Button(window, text=\"Run\", command= lambda: self.Run(status_message))\n run_button.pack()\n\n undo_button = Button(window, text=\"Undo Changes\", command= lambda: self.Undo(status_message))\n undo_button.pack()\n\n quit_button = Button(window, text='Quit', command=window.destroy)\n quit_button.pack()\n\n return window\n\n\n def mainloop(self):\n self.window.mainloop()\n\n\n def Run(self, status_message):\n if self.target_dir == None:\n tkinter.messagebox.showerror(title='User Error', message='You must select a path first.')\n return\n\n # check trailing slash\n if not self.target_dir.endswith('/') or not self.target_dir.endswith('\\\\'):\n self.target_dir += '/'\n\n # no errors up to this point, update status to Running\n status_message.set('Running...')\n\n # check method\n if not self.recursively:\n ProcessData(self.target_dir)\n elif self.recursively:\n RecursiveProcessData(self.target_dir)\n\n # finished successfully\n status_message.set('Finished...')\n\n\n def Undo(self, status_message):\n # call non-class Undo function\n message = Undo()\n\n # update status bar / pop up message for error\n if message == 'There is nothing to undo.':\n tkinter.messagebox.showerror(title='User Error', message=message)\n else:\n status_message.set(message)\n\n\n def ToggleRecursive(self, value):\n if value == 0:\n self.recursively = False\n elif value == 1: \n self.recursively = True\n\n\n def SelectFolder(self):\n self.target_dir = filedialog.askdirectory()\n\n\n def HelpPromt(self):\n prompt = str(\n \"This program will help you to automate the renaming of specimen images by automatically finding and \" \\\n \"decoding data matrices in the images. Simply select the target folder, select whether or not to run \" \\\n \"the algorithm recursively and then hit run.\\n\\nTo run the algorithm recursively means that in addition \" \\\n \"to the target directory (the folder you selected), every subdirectory (every folder within that folder) \" \\\n \"will also undergo the scanning and renaming process. For example, if you select a target folder path of \" \\\n \"/home/user/target/ and that filder contains a folder called random, running recursively will change files \" \\\n \"in both target and random (and any additional subfolders in random).\\n\\nAll changes are temporarily recorded \" \\\n \"in the program, so if you want to undo the script did just hit the undo button BEFORE you close the window!\"\n )\n tkinter.messagebox.showinfo('Usage Help', prompt)\n\"\"\"\n\n#############################\n# ******* MAIN CODE ******* #\ndef AskUsage():\n prompt = str(\n \"This program will help to automate the renaming of specimen images by automatically finding and \" \\\n \"decoding data matrices / barcodes in the images. On start, you will be prompted with whether or not \" \\\n \"to view this help message. After which, the program will begin in 10 seconds. You will enter the path \" \\\n \"to a folder containing the families of the collected speciment. On a mac, you may simply drag the folder \" \\\n \"into the terminal window. You will then have the option to run the program recursively (scanning all \" \\\n \"images in all subfolders) or standardly (scanning only in provided folder, no additional subfolders). \" \\\n \"All changes to file names are temporarily saved, so please review the changes when prompted. You will \" \\\n \"have the chance to undo the program's renaming ONLY WHEN PROMPTED, so it is important to check the results \" \\\n \"before closing / terminating the project\"\n )\n wanted = input(\"\\nDo you want to see the usage information?\\n [1]yes\\n [2]no\\n --> \")\n if wanted == '1' or wanted == 'y' or wanted == 'yes':\n print(prompt)\n time.sleep(10)\n\ndef GetDirs(path):\n subdirectories = []\n for folder in sorted(os.listdir(path)):\n if os.path.isdir(path + folder):\n subdirectories.append(folder)\n return subdirectories\n\n\ndef GetImages(path):\n global checkMGCL\n images = []\n for image in sorted(os.listdir(path)):\n if os.path.isfile(path + image):\n # if specified, do not rename images that already contain MGCL\n if \"MGCL\" not in image and checkMGCL == False:\n images.append(image)\n # default\n elif checkMGCL == True:\n images.append(image)\n return images\n \n\ndef RecursiveProcessData(path):\n for dir in GetDirs(path):\n RecursiveProcessData(path + dir + '/')\n ProcessData(path)\n\n\n\"\"\"\ntakes path to image, scans matrix, returns new name\n\"\"\"\ndef BarcodeRead(path):\n print(\"DMTX not found, looking for legacy barcode:\")\n decoder = decode(Image.open(path))\n try:\n name = str(decoder[0].data)\n except:\n name = \"nothing\"\n return name\n\ndef DMRead(path):\n # stop if nothing is found after 15 seconds (15000 milliseconds)\n print('cat ' + path + ' | dmtxread --stop-after=1 -m' + SCAN_TIME)\n p = subprocess.Popen('cat ' + path + ' | dmtxread --stop-after=1 -m' + SCAN_TIME, shell=True,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return str(p.stdout.readline())\n\ndef ProcessData(path):\n print(\"\\nWorking in... {}\\n\".format(path))\n \n global old_new_paths\n global occurrences\n\n for image in GetImages(path):\n # scanning\n ext = '.' + image.split('.')[1]\n arg = path + image\n\n print(image)\n\n new_name = DMRead(arg)\n if \"MGCL\" not in new_name:\n new_name = BarcodeRead(arg)\n \n # Replace garbage characters read in\n new_name = str(new_name).replace(\"b\\'\", '').replace(' ', '_').replace('\\'', '')\n\n new_name = new_name.replace(\"b\\'\", '').replace(' ', '_').replace('\\'', '')\n\n # get and check specimen id\n scanned_id = int(new_name.split('_')[1])\n \n if \"lateral\" in image.lower() or \"lat\" in image.lower() or \"_l\" in image.lower():\n # Lateral\n new_name += '_L'\n \n \n else:\n if not occurrences or not scanned_id in occurrences:\n occurrences[scanned_id] = 1\n elif scanned_id in occurrences:\n occurrences[scanned_id] += 1\n\n if occurrences[scanned_id] == 1:\n # Dorsal\n if(\"_L\" in image or \"_V\" in image):\n print(\"Warning changed to Dorsal\")\n new_name += '_D'\n elif occurrences[scanned_id] == 2:\n # Ventral\n if(\"_L\" in image or \"_D\" in image):\n print(\"Warning changed to Ventral\")\n new_name += '_V'\n else:\n if(\"_L\" in image or \"_V\" in image or \"_D\" in image):\n print(\"Warning changed to Manual\")\n new_name += '_MANUAL'\n\n\n # renaming\n # os.rename(path + image, path + (new_name + ext))\n print ('Renaming {} as {}\\n'.format(path + image, path + new_name + ext))\n old_new_paths.append(tuple((path + image, path + new_name + ext)))\n\n\ndef Wait():\n wait = True\n print(\"Program completed... Please look over changes.\")\n\n while wait == True:\n undo = input(\"Do you wish to undo?\\n [1]yes\\n [2]no\\n --> \")\n if undo == '1' or undo == 'y' or undo =='yes':\n print(Undo())\n wait = False\n elif undo == '2' or undo == 'n' or undo == 'no':\n wait = False\n else:\n print('Input error. Invalid option.')\n continue\n\ndef Undo():\n global old_new_paths\n print('\\nUndoing changes...')\n for old_path,new_path in old_new_paths:\n #os.rename(new_path, old_path)\n print ('Renaming {} back to {}\\n'.format(new_path, old_path))\n return 'Success... Restored original state.'\n\n\ndef main():\n global SCAN_TIME \n global checkMGCL\n \n #interface = input(\"\\nWould you prefer to use a: \\n [1]command-line interface \\n [2]graphical interface \\n--> \")\n interface = '1'\n if interface == '1':\n # museum preformatted file names => MGCL_7digitnum\n AskUsage()\n path = input('\\nPlease enter the path to the folder of images: \\n --> ')\n\n new_time = input('\\nPlease enter the max amount of scan time to search for a matrix per image (in seconds): \\n --> ')\n while not new_time.isdigit():\n new_time = input('Input error. Please enter an integer. \\n --> ')\n SCAN_TIME = new_time + '000'\n\n askMGCL = 'nothing'\n while askMGCL == 'nothing':\n askMGCL = input('Would you like to scan images already containing (MGCL) in the filename \\n [1] Yes \\n [2] No \\n --> ')\n if askMGCL.lower() == \"yes\" or askMGCL.lower() == \"y\" or askMGCL == \"1\":\n checkMGCL = True\n elif askMGCL.lower() == \"no\" or askMGCL.lower() == \"n\" or askMGCL == \"2\":\n checkMGCL = False\n else:\n askMGCL = 'nothing'\n print('Please enter a correct value: ')\n\n # this check removes trailing whitespace, an occurrence when dragging a folder into the terminal prompt in MacOS\n if path.endswith(' '):\n path = path[:-1]\n\n # ensures trailing '/' is present\n if not path.endswith('/') or not path.endswith('\\\\'):\n path += '/'\n\n method = input(\"\\nChoose 1 of the following: \\n [1]Standard (All files \" \\\n \"in this directory level only) \\n [2]Recursive (All files in this \" \\\n \"directory level AND every level below) \\n--> \")\n\n if method == '1':\n ProcessData(path)\n Wait()\n elif method == '2':\n RecursiveProcessData(path)\n Wait()\n else:\n print(\"Input error.\")\n sys.exit(1)\n \n #elif interface == '2':\n # window = Tk()\n # my_gui = GUI(window)\n # my_gui.mainloop()\n \n else:\n print(\"Input error.\")\n sys.exit(1)\n\n print ('Program completed...\\n')\n\nif __name__ == '__main__':\n main()\n\n\n\"\"\"\nNotes / Bug report:\n- MacOS has a bug with tkinter revolving around a class (related to the folder finder dialog box)\n being defined twice. This is not a problem with tkinter, it is a problem with Apple. Regardless, \n it is just a warning not an error. Since the definitions are identical it does not actually matter \n which one is eventually chosen to be used by the OS.\n- There is a bug with tkinter and MacOS Mojave dark theme. The text on tkinter buttons / widgets is \n not visible if dark mode is enabled. No such errors exist when running on Linux platforms, however.\n\"\"\"\n","sub_path":"python/dm_reader.py","file_name":"dm_reader.py","file_ext":"py","file_size_in_byte":13423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382503893","text":"import np_getdata\r\nimport np_model\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\n\r\ntrain_data, test_data, train_label, test_label, data, labels, train_num = np_getdata.getData()\r\n\r\nnet = np_model.threeLayersNN(input_=train_data.shape[1], hidden=10, output=3, learning_rate=1e-2)\r\n_, _, _, weight1, weight2, bias1, bias2 = net.initialize()\r\n\r\ndef train_iris(num_epoch):\r\n train_loss_lis = []\r\n train_acc_lis = []\r\n test_loss_lis = []\r\n test_acc_lis = []\r\n PATH1_ = './weight1/'\r\n PATH2_ = './weight2/'\r\n PATH3_ = './bias1/'\r\n PATH4_ = './bias2/'\r\n for epoch in range(num_epoch):\r\n PATH1 = PATH1_ + str(epoch)\r\n PATH2 = PATH2_ + str(epoch)\r\n PATH3 = PATH3_ + str(epoch)\r\n PATH4 = PATH4_ + str(epoch)\r\n f1 = open(PATH1, 'w')\r\n f2 = open(PATH2, 'w')\r\n f3 = open(PATH3, 'w')\r\n f4 = open(PATH4, 'w')\r\n\r\n train_loss = train_acc = 0\r\n for row in range(train_num):\r\n if row == 0:\r\n new_weight1 = weight1\r\n new_weight2 = weight2\r\n new_bias1 = bias1\r\n new_bias2 = bias2\r\n\r\n train_data_new = train_data[row,:].reshape(-1, 1)\r\n ground_truth = train_label[row]\r\n\r\n #MSELoss, out, _, _, _, _, _, _, _, _ = net.forward(train_data_new, ground_truth, new_weight1, new_weight2, new_bias1, new_bias2)\r\n weight1_, weight2_, bias1_, bias2_, MSELoss, out = net.backpropagation(train_data_new, ground_truth, new_weight1, new_weight2, new_bias1, new_bias2)\r\n \r\n \r\n new_weight1 = weight1_\r\n new_weight2 = weight2_\r\n new_bias1 = bias1_\r\n new_bias2 = bias2_\r\n train_loss += MSELoss\r\n if out == ground_truth:\r\n train_acc += 1\r\n\r\n f1.write(str(weight1_))\r\n f1.write('\\n')\r\n f2.write(str(weight2_))\r\n f2.write('\\n')\r\n f3.write(str(bias1_))\r\n f3.write('\\n')\r\n f4.write(str(bias2_))\r\n f4.write('\\n')\r\n f1.close()\r\n f2.close()\r\n f3.close()\r\n f4.close()\r\n\r\n train_acc /= train_num\r\n train_loss /= train_num\r\n\r\n train_loss_lis.append(train_loss)\r\n train_acc_lis.append(train_acc)\r\n\r\n test_acc, test_loss = test_iris(new_weight1, new_weight2, new_bias1, new_bias2)\r\n test_acc_lis.append(test_acc)\r\n test_loss_lis.append(test_loss)\r\n\r\n print('epoch:', epoch, 'training loss:', '%0.6f'%train_loss, 'training accuracy:', '%0.3f'%train_acc, \\\r\n '\\t', 'testing loss:', '%0.6f'%test_loss, 'testing accuracy:', '%0.3f'%test_acc)\r\n \r\n print('final accuracy:', '%0.3f'%test_acc_lis[-1])\r\n return train_loss_lis, train_acc_lis, test_loss_lis, test_acc_lis\r\n\r\ndef test_iris(test_weight1, test_weight2, test_bias1, test_bias2):\r\n test_num = data.shape[0] - train_num\r\n test_loss = test_acc = 0\r\n for row in range(test_num):\r\n test_data_new = test_data[row,:].reshape(-1, 1)\r\n test_ground_truth = test_label[row]\r\n MSELoss, out, _, _, _, _, _, _, _, _, = net.forward(test_data_new, test_ground_truth, test_weight1, test_weight2, test_bias1, test_bias2)\r\n\r\n test_loss += MSELoss\r\n if out == test_ground_truth:\r\n test_acc += 1\r\n\r\n test_acc /= test_num\r\n test_loss /= test_num\r\n\r\n return test_acc, test_loss\r\n\r\n\r\ndef plot(train_loss_lis, train_acc_lis, test_loss_lis, test_acc_lis, num_epoch):\r\n epoch = []\r\n for i in range(num_epoch):\r\n epoch.append(i)\r\n\r\n plt.figure(1)\r\n plt.plot(epoch, train_loss_lis, label = 'training loss')\r\n plt.plot(epoch, test_loss_lis, label = 'testing loss')\r\n plt.title('training and testing loss with epoch')\r\n plt.legend()\r\n plt.xlabel('epoch')\r\n plt.ylabel('loss')\r\n\r\n plt.figure(2)\r\n plt.plot(epoch, train_acc_lis, label = 'training accuracy')\r\n plt.plot(epoch, test_acc_lis, label = 'testing accuracy')\r\n plt.title('training and testing accuracy with epoch')\r\n plt.legend()\r\n plt.xlabel('epoch')\r\n plt.ylabel('accuracy')\r\n\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n train_loss_lis, train_acc_lis, test_loss_lis, test_acc_lis = train_iris(1000)\r\n plot(train_loss_lis, train_acc_lis, test_loss_lis, test_acc_lis, 1000)\r\n\r\n","sub_path":"homework2/np_iris.py","file_name":"np_iris.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548836402","text":"\"\"\"\nMatematické gymnázium nabízí aplikaci, která sděluje informaci o povinnosti vykonání přijímací zkoušky. \nPožádejte uživatele o zadání známky z matematiky a průměru všech známek na posledním vysvědčení. \nPokud má zájemce proměr známek nižší než 1.8 a z matematiky nejhůře dvojku, vypište text: \"Přijmeme vás bez přijímací zkoušky.\". \nV opačném případě vypište \"Musíte splnit přijímací zkoušku.\".\n\n3\n\"\"\"\n\nmarkAverage = float(input(\"Zadej průměr známek: \"))\nmarkMath = int(input(\"Zadej známku z matematiky: \"))\nif markAverage <= 1.8 and markMath <= 2:\n print(\"Přijmeme vás bez přijímací zkoušky.\")\nelse:\n print(\"Musíte splnit přijímací zkoušku.\")","sub_path":"zaklady-programovani-2/reseni_19_04_2020/podminky-2/priklad_2.py","file_name":"priklad_2.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565776846","text":"#!/usr/bin/env python\n\nimport ircbot\nimport sys\n\ndef main():\n if len(sys.argv) != 4:\n sys.exit(\"Use main.py \")\n else:\n irc = ircbot.IrcBot(*sys.argv[1:])\n try:\n irc.run()\n except KeyboardInterrupt:\n irc.quit(\"Closed by user.\")\n sys.exit(\"Interrupted by user.\")\n \nif __name__ == \"__main__\":\n main()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"369190312","text":"no_OfNodes = int(input('Enter the no of nodes: '))\nfront = -1\nqueue = []\n\n\ndef enqueue(x):\n global front, queue\n if front == -1:\n front = 0\n queue.append(x)\n\n\ndef dequeue():\n global front, no_OfNodes\n front += 1\n if front <= no_OfNodes - 1 and front < len(queue):\n bfs(queue[front])\n\n\ndef bfs(x):\n global graph, no_OfNodes\n if x not in queue:\n enqueue(x)\n\n for i in range(no_OfNodes):\n if graph[x][i] and i not in queue:\n enqueue(i)\n\n dequeue()\n\n\ngraph = [[0 for i in range(no_OfNodes)] for j in range(no_OfNodes)]\nvertices = input(\"enter the name of vertices: \")\nvertices = vertices.replace(\" \", \"\")\nwhile True:\n print(\"enter the directed connected vertices\")\n v1 = (input())[0]\n v2 = (input())[0]\n graph[vertices.index(v1)][vertices.index(v2)] = 1\n\n resp = (input(\"More connected vertices?(Y or y / N or n): \"))[0]\n if resp == 'n' or resp == 'N':\n break\nprint(graph)\nsetOfInit = []\n\nfor i in range(no_OfNodes):\n bfs(i)\n print(len(queue))\n if len(queue) == no_OfNodes:\n setOfInit.append(vertices[i])\n front = -1\n queue.clear()\n\nprint(setOfInit)","sub_path":"Assignment-1-Initiator_In_A_Graph/Find_Initiators.py","file_name":"Find_Initiators.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"82525205","text":"from text import Text\n\nclass Sentence(Text):\n def __init__(self, text, pos, index, boolean):\n self.index = index\n self.boolean = boolean\n self.pos = pos\n if text != \"\":\n if self.isElemental(text):\n color = 'blue'\n else:\n color = 'black'\n if boolean:\n self.renderedText = Text(text + \" : T\", color, True, True)\n else:\n self.renderedText = Text(text + \" : F\", color, True, True)\n self.realPos = [self.pos[0] - (self.renderedText.width // 2), self.pos[1]]\n else:\n self.renderedText = Text(\" \", 'black', True, False)\n self.realPos = [self.pos[0], self.pos[1]]\n if index:\n self.renderedIndex = Text(str(self.index), 'red', True, False)\n\n def update(self, newText):\n self.textParsed = self.parse(newText)\n self.renderedText = Text(self.textParsed, 'black', True, False)\n self.realPos = [self.pos[0] - (self.renderedText.width // 2), self.pos[1]]\n \n def isElemental(self, text):\n phrases = [\">\", \"<\", \".\", \",\", \"-\"]\n symbols = [\" → \", \" ↔ \", \" ∨ \", \" ∧ \", \"¬\"]\n if any(i in phrases for i in text):\n return False\n else:\n return True\n \n def render(self, scr):\n if self.index:\n scr.blit(self.renderedIndex.text, (self.realPos[0] - 20, self.realPos[1]))\n scr.blit(self.renderedText.text, self.realPos)\n","sub_path":"sentence.py","file_name":"sentence.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"123256057","text":"from netCDF4 import Dataset\nimport numpy as np\nimport math\nfrom sys import exit, path\nfrom os import getcwd\npath.insert(0, getcwd()+'/../helpers/genNetCDF')\nimport Topography as tg\nimport Forcing as fc\nimport ICARoptions as opt\n\n# Python program generates an ideal case\nclass IdealTest:\n # from ideal test\n sealevel_pressure = 100000.0 # pressure at sea level [Pa]\n dz_value = 500.0 # thickness of each model gridcell [m]\n # hill values currently do nothing\n hill_height = 1000.0 # height of the ideal hill(s) [m]\n n_hills = 1.0 # number of hills across the domain\n\n def __init__(self, nz=10, nx=2, ny=2, n_hills=1.0):\n rh = 0.9\n u_test_val = 0.5\n v_test_val = 0.5\n w_test_val = 0.0\n water_vapor_test_val = 0.001\n theta_test_val = 300.0\n\n self.forcing = fc.Forcing(nz, nx, ny, self.sealevel_pressure,\n rh, u_test_val, v_test_val, w_test_val,\n water_vapor_test_val, theta_test_val,\n dz_value=self.dz_value)\n\n self.init = tg.Topography(nz, nx, ny)\n\n\ndef main():\n # ICAR Options generate the ICAR namelist\n options = opt.ICARoptions()\n test = IdealTest(nz=40, nx=40, ny=40, n_hills=1.0)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/gen_ideal_test.py","file_name":"gen_ideal_test.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593755424","text":"\"\"\"\ntest_cruAKtemp.py\n tests of the cruAKtemp component of permamodel\n\"\"\"\n\nimport cruAKtemp\nimport os\nimport numpy as np\nfrom ..tests import data_directory, examples_directory\nfrom nose.tools import (assert_is_instance, assert_greater_equal,\n assert_less_equal, assert_almost_equal,\n assert_greater, assert_in, assert_true,\n assert_equal)\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n\n# ---------------------------------------------------\n# Tests that the frost_number module is importing\n# ---------------------------------------------------\ndef test_can_initialize_cruAKtemp_class():\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod\n\ndef test_write_gridfile():\n \"\"\" Test that can write a gridfile to disk \"\"\"\n # Create a temperature grid with default structure\n grid_desc = cruAKtemp.cruAKtemp_utils.write_gridfile('temperature')\n\n # Create a temperature grid with described shape and type\n grid_desc = cruAKtemp.cruAKtemp_utils.write_gridfile('temperature',\n gridshape=(3,4),\n gridtype=np.float64)\n\n # Fail when attempting to create a grid with non-shape shape\n try:\n grid_desc = cruAKtemp.cruAKtemp_utils.write_gridfile('temperature',\n gridshape='notashape')\n except ValueError:\n pass\n\ndef test_write_default_temperature_cfg_file():\n \"\"\" test that util operation writes default cfg file \"\"\"\n cruAKtemp.cruAKtemp_utils.generate_default_temperature_run_cfg_file(\\\n SILENT=True)\n\ndef test_initialize_opens_temperature_netcdf_file():\n \"\"\" Test that temperature netcdf file is opened \"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n\ndef test_get_timestep_from_date():\n \"\"\" Test get timestep from a date \"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n\n # Timestep should initialize to zero\n this_timestep = 0\n assert_equal(this_timestep, ct._current_timestep)\n\n # Adding 10 years should make the current timestep 10\n number_of_years = 10\n ct.increment_date(number_of_years)\n assert_equal(10, ct._current_timestep)\n\n #...and make the date 10 days later\n this_timedelta = relativedelta(years=number_of_years)\n assert_equal(ct.first_date+this_timedelta, ct._current_date)\n\ndef test_time_index_yields_correct_values():\n \"\"\" Check that we get the expected index into the netcdf file\n for specified month and year \"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n\n # Test that first month yields index zero\n month = 1\n year = 1901\n idx = ct.get_time_index(month, year)\n assert_equal(idx, 0)\n\n # Test that a year later yields index 12\n month = 1\n year = 1902\n idx = ct.get_time_index(month, year)\n assert_equal(idx, 12)\n\n # Test that a century and a year later yields index 1212\n month = 1\n year = 2002\n idx = ct.get_time_index(month, year)\n assert_equal(idx, 1212)\n\ndef test_specific_netcdf_values():\n \"\"\" Test that indexing yields specific values chosen from file\n Values were hand-verified using panoply tables\"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n\n # Indexes here are based on the reduced-resolution grid, if used\n # Note: Panoply has 1-based indexes, so must add 1 to these\n # to get Panoply equivalent\n t_idx = 0\n x_idx = 0\n y_idx = 0\n assert_almost_equal(ct._temperature[t_idx, y_idx, x_idx], -26.1, places=5)\n\n t_idx = 20\n x_idx = 0\n y_idx = 0\n assert_almost_equal(ct._temperature[t_idx, y_idx, x_idx], -0.3, places=5)\n\n t_idx = 20\n x_idx = 0\n y_idx = 6\n assert_almost_equal(ct._temperature[t_idx, y_idx, x_idx], -1.9, places=5)\n\n\ndef test_getting_monthly_annual_temp_values():\n \"\"\" Test that prior_months and prior_year values are correct\n Values were hand-verified using panoply tables\"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n\n # Test prior months values\n # These are values starting from 2/1901\n #actualvalues = [-28.700001, -24.799999, -16.600000, -2.700000,\n # 7.800000, 11.000000, 7.100000, -0.300000,\n # -13.400000, -22.100000, -26.500000, -25.700001]\n #actualmean = -11.241668\n\n # These values start with 1/1902\n actualvalues = [-25.7, -27.0, -26.6, -16.9, -2.8, 8.1,\n 11.4, 7.3, -0.3, -13.6, -22.1, -28.9]\n actualmean = -11.425\n\n vallist = []\n\n for i in np.arange(0, 12):\n vallist.append(ct.T_air_prior_months[i][0, 0])\n\n for i in np.arange(0, 12):\n assert_almost_equal(vallist[i], actualvalues[i], places=5)\n\n # Test prior year value\n assert_almost_equal(ct.T_air_prior_year[0, 0], actualmean, places=5)\n\ndef test_can_increment_to_end_of_run():\n \"\"\" Test that we can get values for last timestep \"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n\n number_of_years = ct._last_timestep - ct._first_timestep\n ct.increment_date(number_of_years)\n ct.update_temperature_values()\n ct.T_air.tofile(\"end_T_air.dat\")\n # Note: nc time of 4000 corresponds to model date of Dec 15, 2010\n\ndef test_first_and_last_valid_dates():\n \"\"\" Test that first and last valid dates are read from netcdf file \"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n assert_equal(datetime.date(1901,1,1), ct._first_valid_date)\n assert_equal(datetime.date(2009,12,31), ct._last_valid_date)\n\ndef test_jan_jul_arrays():\n \"\"\" test that cruAKtemp provides Jan and Jul values as individual arrays \"\"\"\n ct = cruAKtemp.cruAKtemp.CruAKtempMethod()\n ct.initialize_from_config_file()\n expected_jan_val = -25.7\n expected_jul_val = 11.4\n\n assert_almost_equal(ct.T_air_prior_jan[0, 0], expected_jan_val, places=5)\n assert_almost_equal(ct.T_air_prior_jul[0, 0], expected_jul_val, places=5)\n","sub_path":"cruAKtemp/tests/test_cruAKtemp.py","file_name":"test_cruAKtemp.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"120969331","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import ValidationError\nfrom datetime import datetime\nimport requests\nimport qrcode\nimport base64\nimport json\nfrom io import BytesIO\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\n# Retorna las cabezeras de la peticion HTTP\ndef _get_headers(config):\n HEADERS = {\n 'Authorization': config.api_token,\n 'Content-Type': 'application/json'\n }\n return HEADERS\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.invoice\"\n\n # Campo para guardar respuesta de sunat\n sunat_response = fields.Text(string=\"Respuesta de Sunat\", readonly=True, copy=False)\n sunat_request = fields.Text(string=\"Solicitud a Sunat\", readonly=True, copy=False)\n sunat_status = fields.Selection([\n ('-', 'Pendiente de Envío'),\n ('0', 'Aceptado por Sunat'),\n ('1', 'Rechazado por Sunat'),\n ('2', 'Anulado en Sunat'),\n ('3', 'Error'),\n ('4', 'Aceptado por el Proveedor'),\n ('5', 'Anulacion no Recibida por Sunat')],\n\n string='Estado Sunat', help='Estado del Documento en Sunat', default='-', readonly=True, copy=False)\n sunat_description = fields.Text(string=\"Mensaje de Sunat\", readonly=True, copy=False)\n sunat_qr_code = fields.Binary(string=\"QR de Sunat\", copy=False)\n sunat_pdf = fields.Binary(string=\"PDF de Sunat\", copy=False)\n electronic_invoicing = fields.Boolean(string=\"Electronic Invoicing Peru\", compute=\"_compute_electronic_invoicing\",\n store=True, copy=False)\n\n # is_electronic_invoicing = fields.Boolean(string=\"Is Electronic Invoicing Peru\", default=False, copy=False)\n\n @api.depends('company_id.electronic_invoicing', 'document_type_code', 'type')\n def _compute_electronic_invoicing(self):\n for rec in self:\n rec.electronic_invoicing = rec.company_id.electronic_invoicing or False\n if rec.electronic_invoicing and rec.type not in ['out_invoice', 'out_refund']:\n rec.electronic_invoicing = False\n if rec.electronic_invoicing and rec.document_type_code not in ['01', '03', '07', '08']:\n rec.electronic_invoicing = False\n\n @api.multi\n def action_invoice_open(self):\n # OVERRIDE\n res = super(AccountInvoice, self).action_invoice_open()\n\n config = self.company_id or self.env.user.company_id\n\n if self.type and self.type in ['out_invoice', 'out_refund']:\n if self.sunat_serie.sequence_id and self.sunat_number and config.electronic_invoicing:\n resp = self.report_sunat(config)\n if not resp:\n self.sunat_serie.sequence_id.number_next = int(self.sunat_number)\n raise ValidationError(\"No se pudo validar el documento en Sunat\")\n\n return res\n\n # 0004 - Inicio\n @api.multi\n def anular_sunat(self, reason, config=False):\n if type(config) is not type(self.env['res.company']) or not config:\n config = self.company_id or self.env.user.company_id\n data = {}\n data['operacion'] = 'generar_anulacion'\n data['tipo_de_comprobante'] = 1\n data['serie'] = self.sunat_serie.name if self.sunat_serie else \"\"\n data['numero'] = self.sunat_number if self.sunat_serie else \"\"\n data['motivo'] = reason if reason else \"\"\n data['codigo_unico'] = self.id\n response = requests.post(url=config.api_url, headers=_get_headers(config), json=data)\n if response.status_code == 200:\n resp = response.json()\n if resp['aceptada_por_sunat']:\n self.sunat_status = '2'\n return True\n else:\n self.sunat_status = '5'\n return self.sunat_check_status(config)\n else:\n return False\n\n @api.multi\n def sunat_check_status(self, config=False):\n if type(config) is not type(self.env['res.company']) or not config:\n config = self.company_id or self.env.user.company_id\n for rec in self:\n rpta = False\n data = {}\n data['operacion'] = 'consultar_anulacion'\n data['tipo_de_comprobante'] = 1\n data['serie'] = rec.sunat_serie.name if rec.sunat_serie else \"\"\n data['numero'] = rec.sunat_number if rec.sunat_serie else \"\"\n data['codigo_unico'] = rec and rec.id or 0\n if rec.sunat_status == '5':\n response = requests.post(url=config.api_url, headers=_get_headers(config), json=data)\n if response.status_code == 200:\n resp = response.json()\n if resp['aceptada_por_sunat']:\n rec.sunat_status = '2'\n rpta = True\n return rpta\n\n @api.multi\n def report_sunat(self, config=False):\n respuesta = False\n if type(config) is not type(self.env['res.company']) or not config:\n config = self.company_id or self.env.user.company_id\n # -- JSON --\n data = {}\n seq = False\n number = False\n if self.sunat_serie.sequence_id and not self.sunat_number:\n seq = self.sunat_serie.sequence_id\n number = seq._next_do()\n if number:\n self.sunat_number = number\n if not number:\n number = self.sunat_number\n seq = self.sunat_serie.sequence_id\n if self.electronic_invoicing:\n documentos = {\n # 'out_invoice': 1, # FACTURA / Customer Invoice\n # 'out_invoice': 1, # FACTURA / Customer Invoice\n '01': 1, # FACTURA / Customer Invoice\n '03': 2, # BOLETA / Customer Invoice\n '07': 3, # NOTA DE CRÉDITO / Customer Credit Note\n '08': 4, # NOTA DE DÉBITO / Customer Invoice\n }\n data['operacion'] = 'generar_comprobante'\n data['tipo_de_comprobante'] = documentos.get(self.document_type_code, 0)\n data['serie'] = self.sunat_serie.name if self.sunat_serie else \"\"\n data['numero'] = (self.sunat_number) if self.sunat_serie and self.sunat_number else \"\"\n data['sunat_transaction'] = int(self.operation_type_id.code) if self.operation_type_id.code else \"\"\n data['cliente_tipo_de_documento'] = \\\n int(self.partner_id.catalog_06_id.code) if self.partner_id.catalog_06_id else \"\"\n data['cliente_numero_de_documento'] = int(self.partner_id.vat) if self.partner_id.vat else \"\"\n data['cliente_denominacion'] = \\\n self.partner_id.registration_name if self.partner_id.registration_name else \"\"\n data['cliente_direccion'] = self.partner_id.street if self.partner_id.street else \"\"\n data['cliente_email'] = self.partner_id.email if self.partner_id.email else \"\"\n data['cliente_email_1'] = \"\"\n data['cliente_email_2'] = \"\"\n data['fecha_de_emision'] = self.date_invoice.strftime(\"%d-%m-%Y\") if self.date_invoice else \"\"\n data['fecha_de_vencimiento'] = self.date_due.strftime(\"%d-%m-%Y\") if self.date_due else \"\"\n monedas = {\n 'PEN': 1,\n 'USD': 2,\n 'EUR': 3\n }\n data['moneda'] = monedas.get(self.currency_id.name, \"\") if self.currency_id else \"\"\n data['tipo_de_cambio'] = self.exchange_rate if self.exchange_rate and self.exchange_rate != 1.0 else \"\"\n porcentaje_de_igv = \"\"\n taxes = list(filter(lambda line: not line.tax_id.tax_rate != 'igv', self.tax_line_ids))\n if len(taxes) > 0:\n porcentaje_de_igv = taxes[0].tax_id.amount\n data['porcentaje_de_igv'] = \"18.00\"\n data['descuento_global'] = \"\"\n data['total_descuento'] = round(self.total_discount, 10) if self.total_discount else \"\"\n data['total_anticipo'] = \"\"\n data['total_gravada'] = round(self.amount_untaxed, 10) if self.total_igv else \"\"\n data['total_inafecta'] = round(self.total_inafecto, 10) if self.total_inafecto else \"\"\n data['total_exonerada'] = round(self.total_exonerado, 10) if self.total_exonerado else \"\"\n if self.total_isc:\n data['total_isc'] = round(self.total_isc, 10) if self.total_isc else \"\"\n data['total_igv'] = round(self.total_igv, 10) if self.total_igv else \"\"\n data['total_gratuita'] = \"\"\n data['total_otros_cargos'] = round(self.inv_otros, 10) if self.inv_otros else \"\"\n data['total'] = round(self.amount_total, 10) if self.amount_total else \"\"\n data['percepcion_tipo'] = \"\"\n data['percepcion_base_imponible'] = \"\"\n data['total_percepcion'] = \"\"\n data['total_incluido_percepcion'] = \"\"\n data['detraccion'] = not self.hide_detraction\n data['observaciones'] = \"\"\n data['documento_que_se_modifica_tipo'] = documentos.get(self.refund_invoice_document_type_id.code, 0) \\\n if self.refund_invoice_document_type_id.code else ''\n data['documento_que_se_modifica_serie'] = self.refund_invoice_sunat_serie or ''\n data['documento_que_se_modifica_numero'] = int(self.refund_invoice_sunat_number) \\\n if self.refund_invoice_sunat_serie and self.refund_invoice_sunat_number else \"\"\n data['tipo_de_nota_de_credito'] = int(self.credit_note_type_id.code) if self.credit_note_type_id else ''\n data['tipo_de_nota_de_debito'] = self.debit_note_type or ''\n data['enviar_automaticamente_a_la_sunat'] = True\n data['enviar_automaticamente_al_cliente'] = False\n data['codigo_unico'] = self and self.id or 0\n data['condiciones_de_pago'] = self.payment_term_id.name if self.payment_term_id else \"\"\n data['medio_de_pago'] = \"\"\n data['placa_vehiculo'] = \"\"\n data['orden_compra_servicio'] = self.origin[:20] if self.origin else \"\"\n if not self.hide_detraction:\n data['detraccion_tipo'] = float(self.detrac_id.number) if self.detrac_id.number else \"\"\n data['detraccion_total'] = self.detraccion if self.detraccion else \"\"\n data['tabla_personalizada_codigo'] = \"\"\n data['formato_de_pdf'] = \"\"\n data['generado_por_contingencia'] = \"\"\n data['items'] = []\n # data['guias'] = []\n\n numorden = 0\n for line in self.invoice_line_ids:\n item = {}\n item['unidad_de_medida'] = line.uom_id.sunat_code if line.uom_id.sunat_code else \"\"\n item['codigo'] = \"\" # str(numorden).zfill(4)\n item['descripcion'] = str(line.product_id.name) if line.product_id else \"\"\n item['cantidad'] = line.quantity\n item['valor_unitario'] = round(line.price_unit, 10)\n item['precio_unitario'] = round((line.total_without_discount) / line.quantity, 10)\n item['descuento'] = round(line.total_discount, 10) if line.total_discount else \"\"\n item['subtotal'] = round(line.price_subtotal, 10)\n if line.total_isc and line.isc_type:\n item['tipo_de_isc'] = line.isc_type if line.isc_type else \"\"\n item['isc'] = round(line.total_isc, 10) if line.total_isc else 0\n item['tipo_de_igv'] = line.igv_type if line.igv_type else \"\"\n item['igv'] = round(line.total_igv, 10) if line.total_igv else 0\n item['total'] = round(line.price_total, 10)\n item['anticipo_regularizacion'] = False\n item['anticipo_documento_serie'] = \"\"\n item['anticipo_documento_numero'] = \"\"\n if line.product_id.sunat_product_id.code:\n item['codigo_producto_sunat'] = line.product_id.sunat_product_id.code\n data['items'].append(item)\n _logger.info(\"\\n\" + json.dumps(data, indent=3))\n try:\n # Realizamos la peticion HTTP y obtenemos la respuesta\n response = requests.post(url=config.api_url, headers=_get_headers(config), json=data)\n except:\n raise ValidationError(\"Error al comunicarse con el proveedor de facturacion electronica,\"\n \"Validar los datos configurados.\")\n # response = requests.get(url)\n _logger.info(response.json() if response.status_code else \"\")\n # Si la Respuesta es Correcta\n\n if response.status_code and response.status_code == 200:\n resp = response.json()\n if resp['cadena_para_codigo_qr'] and resp['enlace_del_pdf'] and resp['codigo_hash']:\n # Inicio - Generamos el codigo qr\n qr_code = qrcode.make(resp['cadena_para_codigo_qr'])\n buffered = BytesIO()\n qr_code.save(buffered, format=\"JPEG\")\n qr_base64 = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n # Fin - Generamos el codigo qr\n if resp['aceptada_por_sunat']:\n sunat_status = \"0\"\n elif not resp['aceptada_por_sunat']:\n sunat_status = \"4\"\n else:\n sunat_status = \"3\"\n self.sunat_response = json.dumps(resp)\n self.sunat_request = json.dumps(data)\n self.sunat_status = sunat_status\n self.sunat_description = resp['sunat_description']\n self.sunat_qr_code = qr_base64\n respuesta = True\n elif response.status_code and response.status_code == 400:\n resp = response.json()\n self.sunat_response = json.dumps(resp)\n self.sunat_status = '1'\n self.sunat_request = json.dumps(data)\n if resp['errors']:\n raise ValidationError(resp['errors'])\n if resp['sunat_soap_error']:\n raise ValidationError(resp['sunat_soap_error'])\n respuesta = True\n else:\n self.sunat_response = str(response)\n self.sunat_status = '3'\n self.sunat_request = json.dumps(data)\n if self.sunat_status != '0' and self.sunat_status != '4':\n seq.number_next = int(number)\n self.sunat_number = 0\n return respuesta\n # 0004 - Inicio\n","sub_path":"Funcion_Digital/qa_electronic_billing/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":14695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150998838","text":"from numpy import *\r\nfrom math import *\r\nx0=3.7\r\ne=1e-4\r\nn=0\r\ndef phi(x):\r\n return (8/5)*(log(x) + 1)\r\nwhile (True):\r\n n+=1\r\n x=(x0*phi(phi(x0))-phi(x0)*phi(x0))/(phi(phi(x0))-2*phi(x0)+x0)\r\n if abs(x0 - x) <= e:break\r\n x0=x\r\nprint(\"За {0} итераций получили корень {1} с точностью {2}\".format(n,x0,e))\r\nprint(\"Вектор невязки:{0}\".format(x0-(8/5)*(log(x0)+1)))","sub_path":"МЧА-4-семестр/Lab3/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"450644228","text":"\"\"\"guiamuseos URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom museos import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib.auth.views import login, logout\n\nadmin.autodiscover ()\n\nurlpatterns = [\n url(r'^$', views.main, name='home'),\n url(r'css_color', views.css_color),\n url(r'css_letra', views.css_letra),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^about', views.about),\n url(r'^login', views.auth_login),\n url(r'^museos/(\\d+)$', views.museo_pers),\n url(r'^museos', views.allmuseums),\n url(r'^logout', logout, {'next_page': '/'}),\n url(r'^register', views.register),\n url(r'^accesibilidad', views.accesibilidad),\n url(r'^(.*)/XML', views.XML),\n url(r'^(.*)$', views.personal),\n url(r'/', views.main, name='home'),\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"guiamuseos/guiamuseos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"240195044","text":"#!/usr/bin/env python\ndef main():\n import sys\n\n raw_data = load_csv(sys.argv[1])\n\n k_l = set()\n for k in raw_data:\n k_l.add(get_stencil_num(k))\n k_l = list(k_l)\n\n # for ts in ['Naive', 'Dynamic-Intra-Diamond'] \n for k in k_l:\n for is_dp in [1]:\n for t in [0, 1]:\n plot_lines(raw_data, k, is_dp, t)\n\n \ndef get_stencil_num(k):\n # add the stencil operator\n if k['Stencil Kernel coefficients'] in 'constant':\n if int(k['Stencil Kernel semi-bandwidth'])==4:\n stencil = 0\n else:\n stencil = 1\n elif 'no-symmetry' in k['Stencil Kernel coefficients']:\n stencil = 5\n elif 'sym' in k['Stencil Kernel coefficients']:\n if int(k['Stencil Kernel semi-bandwidth'])==1:\n stencil = 3\n else:\n stencil = 4\n else:\n stencil = 2\n return stencil\n \ndef plot_lines(raw_data, stencil_kernel, is_dp, t):\n from operator import itemgetter\n import matplotlib.pyplot as plt\n import matplotlib\n import pylab\n from pylab import arange,pi,sin,cos,sqrt\n\n fig_width = 3.8*0.393701 # inches\n fig_height = 1.0*fig_width #* 210.0/280.0#433.62/578.16\n\n fig_size = [fig_width,fig_height]\n params = {\n 'axes.labelsize': 7,\n 'axes.linewidth': 0.5,\n 'lines.linewidth': 0.75,\n 'text.fontsize': 7,\n 'legend.fontsize': 7,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'lines.markersize': 3,\n 'text.usetex': True,\n 'figure.figsize': fig_size}\n pylab.rcParams.update(params)\n\n ts_l = set()\n for k in raw_data:\n ts_l.add(k['Time stepper orig name'])\n ts_l = list(ts_l)\n\n th = set()\n for k in raw_data:\n th.add(int(k['OpenMP Threads']))\n th = list(th)\n\n tb_l = set()\n for k in raw_data:\n tb_l.add(k['Time unroll'])\n tb_l = list(tb_l)\n tb_l = map(int,tb_l)\n tb_l.sort()\n \n tgs_l = set()\n for k in raw_data:\n tgs_l.add(k['Thread group size'])\n tgs_l = list(tgs_l)\n tgs_l = map(int,tgs_l)\n tgs_l.sort()\n\n req_fields = [('Thread group size', int), ('WD main-loop RANK0 MStencil/s MAX', float), ('Time stepper orig name', str), ('OpenMP Threads', int), ('MStencil/s MAX', float), ('Time unroll',int), ('Sustained Memory BW', float)]\n data = []\n for k in raw_data:\n tup = {}\n # add the general fileds\n for f in req_fields:\n tup[f[0]] = map(f[1], [k[f[0]]] )[0]\n\n # add the stencil operator\n# if k['Stencil Kernel coefficients'] in 'constant':\n# if int(k['Stencil Kernel semi-bandwidth'])==4:\n# stencil = 0\n# else:\n# stencil = 1\n# elif 'no-symmetry' in k['Stencil Kernel coefficients']:\n# stencil = 5\n# elif 'sym' in k['Stencil Kernel coefficients']:\n# if int(k['Stencil Kernel semi-bandwidth'])==1:\n# stencil = 3\n# else:\n# stencil = 4\n# else:\n# stencil = 2\n# tup['stencil'] = stencil\n tup['stencil'] = get_stencil_num(k)\n\n # add the precision information\n if k['Precision'] in 'DP':\n p = 1\n else:\n p = 0\n tup['Precision'] = p\n data.append(tup)\n\n data = sorted(data, key=itemgetter('Time stepper orig name', 'Time unroll', 'Thread group size', 'OpenMP Threads'))\n# for i in data: print i\n\n max_single = 0\n# fig, ax1 = plt.subplots()\n# lns = []\n\n marker = 'o'\n x = []\n y = []\n y_m = []\n for k in data:\n if ( ('Naive' in k['Time stepper orig name']) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):\n if k['OpenMP Threads'] == 1 and max_single < k['MStencil/s MAX']/10**3: max_single = k['MStencil/s MAX']/10**3\n y_m.append(k['Sustained Memory BW']/10**3)\n x.append(k['OpenMP Threads'])\n y.append(k['MStencil/s MAX']/10**3)\n marker = 'o'\n col = 'g'\n ts2 = 'Spt.blk.'\n if(x) and t==0:\n plt.plot(x, y, color=col, marker=marker, linestyle='-', label=ts2)\n if(y_m) and t==1:\n plt.plot(x, y_m, color=col, marker=marker, linestyle='-', label=ts2)\n\n x = []\n y = []\n y_m = []\n perf_str = 'WD main-loop RANK0 MStencil/s MAX'\n for k in data:\n if ( ('Diamond' in k['Time stepper orig name']) and (k['Thread group size'] == 10) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):\n y_m.append(k['Sustained Memory BW']/10**3)\n x.append(k['OpenMP Threads'])\n y.append(k[perf_str]/10**3)\n marker = '*'\n markersize = 12\n col = 'm'\n ts2 = str(10) + 'WD'\n if(x) and t==0:\n plt.plot(x, y, color=col, marker=marker, markersize=markersize,linestyle='', label=ts2)\n if(y_m) and t==1:\n plt.plot(x, y_m, color=col, marker=marker, markersize=markersize,linestyle='', label=ts2)\n\n cols = {0:'y', 1:'k', 2:'b', 4:'c', 5:'r', 8:'m'}\n markers = {0:'.', 1:'^', 2:'v', 4:'.', 5:'x', 8:'.'}\n for tgs in [1,2, 4, 8, 5]:\n marker = markers[tgs]\n x = []\n y = []\n y_m = []\n for k in data:\n if ( ('Diamond' in k['Time stepper orig name']) and (k['Thread group size'] == tgs) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp) ):\n if k['OpenMP Threads'] == 1 and max_single < k[perf_str]/10**3: max_single = k[perf_str]/10**3\n y_m.append(k['Sustained Memory BW']/10**3)\n x.append(k['OpenMP Threads'])\n y.append(k[perf_str]/10**3)\n col = cols[tgs]\n ts2 = str(tgs) + 'WD'\n if(x) and t==0:\n plt.plot(x, y, color=col, marker=marker, linestyle='-', label=ts2)\n if(y_m) and t==1:\n plt.plot(x, y_m, color=col, marker=marker, linestyle='-', label=ts2)\n\n\n\n\n # add limits\n mem_limit=0\n# sus_mem_bw = 36500 #SB\n sus_mem_bw = 40 #IB\n\n if stencil_kernel == 0:\n mem_limit = sus_mem_bw/16 \n elif stencil_kernel == 1:\n mem_limit = sus_mem_bw/12\n elif stencil_kernel == 2:\n mem_limit = sus_mem_bw/20\n if is_dp == 1: mem_limit = mem_limit / 2\n if t == 0:\n #plt.plot([1, len(th)], [mem_limit, mem_limit], color='g', linestyle='--', label='Spatial blk. limit')\n pass\n\n # add ideal scaling\n ideal = [i*max_single for i in th]\n if t == 0:\n plt.plot(th, ideal, color='k', linestyle='--', label='Ideal scaling')\n\n if t == 0:\n title = '7_pt_var_all_methods_perf'\n# plt.ylabel('GLUP/s')\n else:\n title = '7_pt_var_all_methods_bw'\n# plt.ylabel('GBytes/s')\n\n f_name = title.replace(' ', '_')\n\n plt.xlabel('Threads')\n \n #iif t == 0: plt.legend(loc='best')\n plt.grid()\n pylab.savefig(f_name+'.png', bbox_inches=\"tight\", pad_inches=0.04)\n pylab.savefig(f_name+'.pdf', format='pdf', bbox_inches=\"tight\", pad_inches=0)\n\n #plt.show() \n plt.clf()\n \ndef load_csv(data_file):\n from csv import DictReader\n with open(data_file, 'rb') as output_file:\n data = DictReader(output_file)\n data = [k for k in data]\n return data\n \n \nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/sisc/paper_plot_thread_scaling_7_pt_var_coeff.py","file_name":"paper_plot_thread_scaling_7_pt_var_coeff.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"826345","text":"\"\"\"\nviews.py\n\nURL route handlers\n\nNote that any handler params must match the URL route params.\nFor dimension the *say_hello* handler, handling the URL route '/hello/',\n must be passed *username* as the argument.\n\n\"\"\"\nfrom google.appengine.api import users\nfrom google.appengine.runtime.apiproxy_errors import CapabilityDisabledError\n\nfrom flask import request, render_template, flash, url_for, redirect\n\nfrom flask_cache import Cache\n\nfrom application import app\nfrom decorators import login_required, admin_required\nfrom forms import ExampleForm, DimensionForm\nfrom models import ExampleModel, DimensionModel\n\n\n# Flask-Cache (configured to use App Engine Memcache API)\ncache = Cache(app)\ndef home():\n if not users.get_current_user():\n return render_template('home.html')\n return list_dimensions()\n\n\ndef say_hello(username):\n \"\"\"Contrived dimension to demonstrate Flask's url routing capabilities\"\"\"\n return 'Hello %s' % username\n\n@login_required\ndef list_dimensions():\n \"\"\"List all dimensions\"\"\"\n dimensions = DimensionModel.query()\n form = DimensionForm()\n if form.validate_on_submit():\n dimension = DimensionModel(\n dimension_name = form.dimension_name.data,\n dimension_description = form.dimension_description.data,\n dimension_value=form.dimension_value.data,\n dimension_tolerance=form.dimension_tolerance.data,\n added_by = users.get_current_user()\n )\n try:\n dimension.put()\n dimension_id = dimension.key.id()\n flash(u'Dimension %s successfully saved.' % dimension_id, 'success')\n return redirect(url_for('list_dimensions'))\n except CapabilityDisabledError:\n flash(u'App Engine Datastore is currently in read-only mode.', 'info')\n return redirect(url_for('list_dimensions'))\n return render_template('list_dimensions.html', dimensions=dimensions, form=form)\n\n\n@login_required\ndef edit_dimension(dimension_id):\n dimension = DimensionModel.get_by_id(dimension_id)\n form = DimensionForm(obj=dimension)\n if request.method == \"POST\":\n if form.validate_on_submit():\n dimension.dimension_name = form.data.get('dimension_name')\n dimension.dimension_description = form.data.get('dimension_description')\n dimension.dimension_value=form.data.get('dimension_value')\n dimension.dimension_tolerance=form.data.get('dimension_tolerance')\n dimension.put()\n flash(u'Dimension %s successfully saved.' % dimension_id, 'success')\n return redirect(url_for('list_dimensions'))\n return render_template('edit_dimension.html', dimension=dimension, form=form)\n\n\n@login_required\ndef delete_dimension(dimension_id):\n \"\"\"Delete an dimension object\"\"\"\n dimension = ExampleModel.get_by_id(dimension_id)\n try:\n dimension.key.delete()\n flash(u'Example %s successfully deleted.' % dimension_id, 'success')\n return redirect(url_for('list_dimensions'))\n except CapabilityDisabledError:\n flash(u'App Engine Datastore is currently in read-only mode.', 'info')\n return redirect(url_for('list_dimensions'))\n\n\n@admin_required\ndef admin_only():\n \"\"\"This view requires an admin account\"\"\"\n return 'Super-seekrit admin page.'\n\n\n@cache.cached(timeout=60)\ndef cached_dimensions():\n \"\"\"This view should be cached for 60 sec\"\"\"\n dimensions = ExampleModel.query()\n return render_template('list_dimensions_cached.html', dimensions=dimensions)\n\n\ndef warmup():\n \"\"\"App Engine warmup handler\n See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests\n\n \"\"\"\n return ''\n\n","sub_path":"src/application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414092530","text":"gift_query = \"insert into \\\"Gifts\\\" (name, url) values ('%s','%s');\\n\"\nquestion_query = \"insert into \\\"Questions\\\" (question) values ('%s');\\n\"\n\nif __name__ == '__main__':\n\tgifts = [line.strip().split(',') for line in open('gifts.csv', 'r') if line.strip()][1:]\n\tquestions = [line.strip() for line in open('questions.csv', 'r') if line.strip()][1:]\n\twith open('commands.sql', 'w') as file:\n\t\tfor gift in gifts:\n\t\t\tfile.write(gift_query % (gift[0], gift[3]))\n\t\tfor question in questions:\n\t\t\tfile.write(question_query % question)\n","sub_path":"src/main/resources/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536835170","text":"import sys\n\ndef getComplement(seq):\n\treturn seq[::-1]\n\ndef getReverse(seq):\n\tcomp = getComplement(seq)\n\tcomp = comp.replace('a', 'i')\n\tcomp = comp.replace('t', 'a')\n\tcomp = comp.replace('i', 't')\n\tcomp = comp.replace('c', 'i')\n\tcomp = comp.replace('g', 'c')\n\tcomp = comp.replace('i', 'g')\n\treturn comp\n\ndef numPalindromes(seq, j):\n\tnum_palindromes = 0\n\tcurrent_seq = ''\n\tfor i in range(len(seq)):\n\t\tif i < j:\n\t\t\tcurrent_seq += seq[i]\n\t\telse:\n\t\t\tcomp_seq = getComplement(current_seq)\n\t\t\tif comp_seq == seq[i:i+j] or comp_seq == seq[i+1:i+j+1]:\n\t\t\t\tnum_palindromes += 1\n\t\t\tcurrent_seq = current_seq[1:] + seq[i]\n\treturn num_palindromes\n\ndef maxPalindrome(seq):\n\tmax_palindrome = 0\n\tfor j in range(3, len(seq)/2):\n\t\tif numPalindromes(seq, j) > 0:\n\t\t\tmax_palindrome = j\n\treturn max_palindrome\n\ndef numReverseLocal(seq, j):\n\tnum_reverse = 0\n\tcurrent_seq = ''\n\tfor i in range(len(seq)/2):\n\t\tif i < j:\n\t\t\tcurrent_seq += seq[i]\n\t\telse:\n\t\t\trev_seq = getReverse(current_seq)\n\t\t\tif rev_seq in seq[i:]:\n\t\t\t\tnum_reverse += 1\n\t\t\t\treturn num_reverse \n\t\t\tcurrent_seq = current_seq[1:] + seq[i]\n\treturn num_reverse \n\ndef maxReverseLocal(seq):\n\tmax_reverse_local = 0\n\tfor j in range(7, len(seq)):\n\t\tif numReverseLocal(seq, j) > 0:\n\t\t\tmax_reverse_local = j\n\treturn max_reverse_local\n\ndef numReverseAcross(seq, seq2, j):\n\tnum_reverse = 0\n\tcurrent_seq = ''\n\tfor i in range(len(seq)):\n\t\tif i < j:\n\t\t\tcurrent_seq += seq[i]\n\t\telse:\n\t\t\trev_seq = getReverse(current_seq)\n\t\t\tif rev_seq in seq2:\n\t\t\t\tnum_reverse += 1\n\t\t\t\treturn num_reverse \n\t\t\tcurrent_seq = current_seq[1:] + seq[i]\n\treturn num_reverse \n\ndef maxReverseAcross(seq, seq2):\n\tmax_reverse_across = 0\n\tfor j in range(7, len(seq)):\n\t\tif numReverseAcross(seq, seq2, j) > 0:\n\t\t\tmax_reverse_across = j\n\treturn max_reverse_across\n\ndef numDirectLocal(seq, j):\n\tnum_direct = 0\n\tcurrent_seq = ''\n\tfor i in range(len(seq)/2):\n\t\tif i < j:\n\t\t\tcurrent_seq += seq[i]\n\t\telse:\n\t\t\tif current_seq in seq[i:]:\n\t\t\t\tnum_direct += 1\n\t\t\t\treturn num_direct\n\t\t\tcurrent_seq = current_seq[1:] + seq[i]\n\treturn num_direct\n\ndef maxDirectLocal(seq):\n\tmax_direct_local = 0\n\tfor j in range(7, len(seq)):\n\t\tif numDirectLocal(seq, j) > 0:\n\t\t\tmax_direct_local = j\n\treturn max_direct_local\n\ndef numDirectAcross(seq, seq2, j):\n\tnum_direct = 0\n\tcurrent_seq = ''\n\tfor i in range(len(seq)):\n\t\tif i < j:\n\t\t\tcurrent_seq += seq[i]\n\t\telse:\n\t\t\tif current_seq in seq2:\n\t\t\t\tnum_direct += 1\n\t\t\t\treturn num_direct\n\t\t\tcurrent_seq = current_seq[1:] + seq[i]\n\treturn num_direct\n\ndef maxDirectAcross(seq, seq2):\n\tmax_direct_across = 0\n\tfor j in range(7, len(seq)):\n\t\tif numDirectAcross(seq, seq2, j) > 0:\n\t\t\tmax_direct_across = j\n\treturn max_direct_across\n\ndef AT(seq):\n\tnum_a = seq.count('a')\n\tnum_t = seq.count('t')\n\ta_t = (num_a + num_t)/15.0\n\treturn a_t\n\ndef distToIntegrase(start, end, accn, file_name):\n\tmini = 10000000\n\tdist = 0\n\tgff = open(file_name + '.RefSeq.gff', 'r')\n\tfor row in gff:\n\t\tcol = row.strip().split('\\t')\n\t\tif col[0] == accn:\n\t\t\tif 'integrase' in col[8] or 'recombinase' in col[8]:\n\t\t\t\tif int(col[3]) > start and int(col[3]) < end:\n\t\t\t\t\tdist1 = abs(start - int(col[3]))\n\t\t\t\t\tdist2 = abs(end - int(col[4]))\n\t\t\t\t\tif dist1 < dist2:\n\t\t\t\t\t\tdist = dist1\n\t\t\t\t\telse:\n\t\t\t\t\t\tdist = dist2\n\t\t\t\t\tif dist < mini:\n\t\t\t\t\t\tmini = dist\n\tgff.close()\n\treturn mini\n\ninput = open(sys.argv[1], 'r')\noutput = open('features', 'w')\nprev_fna = ''\ndna_seq = {}\nfor row in input:\n\tcolumns = row.strip().split('\\t')\n\tif columns[0] != prev_fna:\n\t\tfna = open(columns[0] + '.fna', 'r')\n\t\t### Read Frags and DNA seqs\n\t\tcurrent_accn = '<'\n\t\tcurrent_dna_seq = ''\n\t\tdna_seq = {}\n\t\tfor r in fna:\n\t\t\tr = r.strip()\n\t\t\tif len(r) > 0 and r[0] == '>':\n\t\t\t\tdna_seq[current_accn] = current_dna_seq\n\t\t\t\tcurrent_accn = r.split()[0][1:]\n\t\t\t\tcurrent_dna_seq = ''\n\t\t\telse:\n\t\t\t\tcurrent_dna_seq += r\n\t\tdna_seq[current_accn] = current_dna_seq\n\t\t### Finish reading frags and DNA Seqs ###\n\t\tfna.close()\n\tprev_fna = columns[0]\n\n\taccn = columns[1]\n\tstart = int(columns[2])\n\tend = int(columns[3])\n\n\tstart_seq = dna_seq[accn][start - 100: start + 115]\n\tend_seq = dna_seq[accn][end - 100: end + 115]\n \n\ta_t = round(AT(start_seq), 2)\n\tnum_palindromes_L = numPalindromes(start_seq, 3)\t\t\t\n\tnum_palindromes_R = numPalindromes(end_seq, 3)\n\n\tmax_palindromes_L = maxPalindrome(start_seq)\n\tmax_palindromes_R = maxPalindrome(end_seq)\n\n\tnum_direct_local_L = numDirectLocal(start_seq, 7)\n\tnum_direct_local_R = numDirectLocal(end_seq, 7)\n\n\tnum_direct_across_L = numDirectAcross(start_seq, end_seq, 7)\n\tnum_direct_across_R = numDirectAcross(end_seq, start_seq, 7)\n\n\tnum_reverse_local_L = numReverseLocal(start_seq, 7)\n\tnum_reverse_local_R = numReverseLocal(end_seq, 7)\n\n\tnum_reverse_across_L = numReverseAcross(start_seq, end_seq, 7)\n\tnum_reverse_across_R = numReverseAcross(end_seq, start_seq, 7)\n\n\tmax_direct_local_L = 0#maxDirectLocal(start_seq)\n\tmax_direct_local_R = maxDirectLocal(end_seq)\n\n\tmax_reverse_local_L = 0#maxReverseLocal(start_seq)\n\tmax_reverse_local_R = maxReverseLocal(end_seq)\n\n\tmax_direct_across_L = 0#maxDirectAcross(start_seq, end_seq)\n\tmax_direct_across_R = maxDirectAcross(end_seq, start_seq)\n\n\tmax_reverse_across_L = 0#maxReverseAcross(start_seq, end_seq)\n\tmax_reverse_across_R = maxReverseAcross(end_seq, start_seq)\n\n\tdist_to_integrase = distToIntegrase(start, end, accn, columns[0])\n\toutput.write(str(start) + '\\t' + str(end) + '\\t' + str(a_t) + '\\t' + str(num_palindromes_L) + '\\t' + str(num_palindromes_R) + '\\t' + str(max_palindromes_L) + '\\t' + str(max_palindromes_R) + '\\t')\n\toutput.write(str(num_direct_local_L) + '\\t' + str(num_direct_local_R) + '\\t' + str(num_direct_across_L) + '\\t' + str(num_direct_across_R) + '\\t')\n\toutput.write(str(num_reverse_local_L) + '\\t' + str(num_reverse_local_R) + '\\t' + str(num_reverse_across_L) + '\\t' + str(num_reverse_across_R) + '\\t')\n\toutput.write(str(max_direct_local_R) + '\\t' + str(max_reverse_local_R) + '\\t')\n\toutput.write(str(max_direct_across_R) + '\\t' + str(max_reverse_across_R) + '\\n') #'\\t' + str(dist_to_integrase) + '\\n')\n\noutput.close()\t\ninput.close()\n","sub_path":"repeatFeatures.py","file_name":"repeatFeatures.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160162312","text":"#####################################################\n# remove duplicate rows of a csv file\n#\n# how to use (make sure file is in the current dir):\n# python3 remove_dup_csv.py input.csv output.csv\n#####################################################\nimport os\nimport sys\n\nwith open(sys.argv[1],'r') as input_file, open(sys.argv[2],'w') as output_file:\n seen = set() # set for fast O(1) amortized lookup\n for line in input_file:\n if line in seen: continue # skip duplicate\n seen.add(line)\n output_file.write(line)\n","sub_path":"remove_dup_csv.py","file_name":"remove_dup_csv.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182568883","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.worldometers.info/coronavirus'\npage = requests.get(url)\nsoup = BeautifulSoup(page.text, 'html.parser')\ninfo=soup.find_all('div',class_=\"maincounter-number\")\n\nprint(\"Total Case:\",str(info[0].text).strip())\nprint(\"Total Death:\",str(info[1].text).strip())\nprint(\"Total Recover:\",str(info[2].text).strip())\n","sub_path":"Python/Covid info scraper.py","file_name":"Covid info scraper.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"545063011","text":"\"\"\"\nEjemplo de una clase para manejar fracciones\n(de numerador y denominador entero y positivo)\n\"\"\"\n\n\nclass Fraccion:\n \"\"\" Clase para manejar fracciones de numeros enteros positivos \"\"\"\n\n def __init__(self, numerador, denominador):\n if type(numerador) != int or type(denominador) != int:\n raise ValueError('Solo numeros enteros aceptados')\n if numerador < 0 or denominador < 1:\n raise ValueError('Solo numeros positivos aceptados')\n \n self._numerador = numerador\n self._denominador = denominador\n self._simplificar()\n\n @property\n def numerador(self):\n return self._numerador\n \n @numerador.setter\n def numerador(self, num):\n if type(num) != int:\n raise ValueError('Tipo de dato no admitido para numerador')\n if num < 1:\n raise ValueError('Valor de numerador no admitido')\n self._numerador = num\n self._simplificar()\n\n @property\n def denominador(self):\n return self._denominador\n \n @denominador.setter\n def denominador(self, den):\n if type(den) != int:\n raise ValueError('Tipo de dato no admitido para denominador')\n if den <= 0:\n raise ValueError('Valor de denominador no admitido')\n self._denominador = den\n self._simplificar()\n\n def _simplificar(self):\n \"\"\" Simplificar la fraccion a los numeros mas bajos posibles \"\"\"\n men = min(self._numerador, self._denominador)\n for n in range(men, 1, -1):\n # Si este numero divide a los dos, entonces los divido\n if self._numerador % n == 0 and self._denominador % n == 0:\n self._numerador = int(self._numerador / n)\n self._denominador = int(self._denominador / n)\n break\n\n def __add__(self, otro):\n \"\"\" Sumar fracciones\"\"\"\n if type(otro) != Fraccion:\n raise ValueError('Solo suma aceptada entre fracciones')\n \n nuevo_denominador = self._denominador * otro.denominador\n nuevo_numerador = self._numerador * otro.denominador + otro.numerador * self._denominador\n\n return Fraccion(nuevo_numerador, nuevo_denominador)\n\n def __eq__(self, otro):\n if type(otro) != Fraccion:\n raise ValueError('No son objetos iguales')\n\n return self._numerador == otro.numerador and self._denominador == otro.denominador\n\n def __str__(self):\n return f'({self._numerador} / {self._denominador})'\n\n def __repr__(self):\n return f''\n\n\n# Pruebas de funcionamiento\n\nassert Fraccion(2, 3) + Fraccion(1, 3) == Fraccion(1, 1)\nassert Fraccion(4, 5) + Fraccion(3, 5) == Fraccion(7, 5)\nassert Fraccion(4, 5) + Fraccion(6, 5) == Fraccion(2, 1)\nassert Fraccion(5, 12) + Fraccion(4, 19) == Fraccion(143, 228)\nassert Fraccion(3, 2) + Fraccion(8, 11) == Fraccion(49, 22)\n\n# Probar la simplificacion al inicio\nassert Fraccion(8, 4) == Fraccion(2, 1)\n\nf1 = Fraccion(2, 4)\nassert f1 == Fraccion(1, 2)\n\nf1.numerador = 2\nassert f1 == Fraccion(1, 1), f'{f1} no es igual a {Fraccion(1, 1)}'\n\nf1.denominador = 8\nassert f1 == Fraccion(1, 8)\n\nprint('TODO OK')\n","sub_path":"code/01-basics/class-05.py","file_name":"class-05.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118036372","text":"from swampdragon.serializers.model_serializer import ModelSerializer\nfrom swampdragon.testing.dragon_testcase import DragonTestCase\nfrom .models import TextModel, SDModel\nfrom datetime import datetime\nfrom django.db import models\n\n\nclass DateModel(SDModel):\n date = models.DateTimeField()\n\n\nclass DateModelSerializer(ModelSerializer):\n class Meta:\n model = DateModel\n publish_fields = ('date')\n update_fields = ('date')\n\n\nclass TextModelSerializer(ModelSerializer):\n class Meta:\n model = TextModel\n publish_fields = ('text')\n update_fields = ('text')\n\n\nclass TestModelSerializer(DragonTestCase):\n def test_deserialize_model(self):\n data = {'text': 'foo'}\n serializer = TextModelSerializer(data)\n model_instance = serializer.save()\n self.assertEqual(model_instance.text, data['text'])\n\n def test_passing_invalid_data(self):\n foo = 'text'\n with self.assertRaises(Exception):\n TextModelSerializer(foo)\n\n def test_ignore_non_model_fields(self):\n data = {'text': 'foo', 'random_field': 'val'}\n serializer = TextModelSerializer(data)\n model_instance = serializer.deserialize()\n self.assertEqual(model_instance.text, data['text'])\n\n def test_deserialize_field(self):\n date = datetime.now()\n data = {'date': str(date)}\n serializer = DateModelSerializer(data)\n object = serializer.save()\n self.assertEqual(object.date, date)\n","sub_path":"tests/test_model_serializer_deserialize.py","file_name":"test_model_serializer_deserialize.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65376692","text":"import os\nimport sys\nimport glob\nfrom PIL import Image\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n quit(\"usage : > python3 file3Mdecimate.py [directoryName]\") \n for file in glob.glob(sys.argv[1] + '/*.*'):\n if os.path.getsize(file) > 3000000:\n im = Image.open(file)\n for q in range(100, 1, -1):\n savefile = file + str(q) + \".jpg\"\n im.save(savefile, 'JPEG', quality = q)\n if os.path.getsize(savefile) <= 3000000:\n os.remove(file)\n break\n else:\n os.remove(savefile)\n# os.remove(file)\n","sub_path":"file3Mdecimate.py","file_name":"file3Mdecimate.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"85915723","text":"from django.core.urlresolvers import reverse\n\ndef generar_urls(app, objeto):\n # generador de urls, tener en cuenta tener todas las urls del crud definidas\n urls = {\n 'agregar':'/web/'+app+'/agregar/'+objeto+'/',\n 'editar':'/web/'+app+'/editar/'+objeto+'/',\n 'listar':'/web/'+app+'/listar/'+objeto+'/',\n 'eliminar':'/web/'+app+'/eliminar/'+objeto+'/',\n 'ver':'/web/'+app+'/ver/'+objeto+'/',\n }\n return urls","sub_path":"flay/flay/apps/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"214345734","text":"from config.configure import config\r\nimport classifying.fast_text_utils as ftu\r\n\r\nimport utils.array_utils as au\r\n\r\n\r\nlabel2value = ftu.binary_label2value\r\nkorea_model_file = config.korea_ft_model_file\r\n\r\n\r\nclass ClassifyK:\r\n def __init__(self, ft_model_file):\r\n self.ft_model = None\r\n if ft_model_file is not None:\r\n self.load_fasttext_model(ft_model_file)\r\n \r\n def load_fasttext_model(self, ft_model_file):\r\n self.ft_model = ftu.load_model(ft_model_file)\r\n\r\n\r\ndef predict(textarr, threshold=0.5):\r\n \"\"\" returns value/value array given input text/text array; value(s) are dependent on the threshold \"\"\"\r\n model = ftu.get_model(korea_model_file)\r\n pred_value_arr = ftu.binary_predict(textarr, model, threshold)\r\n return pred_value_arr\r\n\r\n\r\ndef train(train_file, model_file):\r\n model = ftu.FastText()\r\n model.train_supervised(input=train_file, epoch=20, lr=1, wordNgrams=1, verbose=2, minCount=10)\r\n ftu.save_model(model_file, model)\r\n\r\n\r\ndef test(test_file, model_file):\r\n textarr, labelarr = list(), list()\r\n with open(test_file) as testfp:\r\n lines = testfp.readlines()[:20]\r\n for line in lines:\r\n label, text = line.strip().split(' ', 1)\r\n textarr.append(text)\r\n labelarr.append(label)\r\n # for idx, line in enumerate(testlines):\r\n # if pu.is_empty_string(line):\r\n # continue\r\n # label, text = line.split(' ', 1)\r\n # print(label, model.predict(text, threshold=0.5), text)\r\n pred_value_arr = predict(textarr, ftu.load_model(model_file))\r\n label = [label2value[label] for label in labelarr]\r\n print(au.score(label, pred_value_arr, 'auc'))\r\n\r\n\r\nif __name__ == '__main__':\r\n train_file = '/home/nfs/cdong/tw/seeding/NorthKorea/data/train'\r\n test_file = '/home/nfs/cdong/tw/seeding/NorthKorea/data/test'\r\n \r\n # train(train_file, korea_model_file)\r\n test(test_file, korea_model_file)\r\n","sub_path":"classifying/k/_deprecate_classifier.py","file_name":"_deprecate_classifier.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"431533414","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup as parse\nfrom xml.etree import ElementTree\nimport re\nimport datetime\n\n\nprice_regex = re.compile('(?P\\d+[,.]\\d{2}) ?€')\nremove_notes_regex = re.compile('\\((\\w+,?)+\\w+\\)')\nextract_notes_regex = re.compile('((?<=,)|(?<=\\())\\w{1,2}')\n\nurl = 'https://www.studierendenwerk-mainz.de/speiseplan/frontend/index.php'\n\ncanteenLegend = {\n # API Extraction: https://github.com/kreativmonkey/jgu-mainz-openmensa/issues/1\n '0' : 'Alle Mensas',\n '1' : 'Zentralmensa',\n '2' : 'Mensa Georg Foster',\n '3' : 'Café Rewi',\n '4' : 'Mensa Bingen',\n '5' : 'Mensa K3',\n '6' : 'Mensa Holzstraße',\n '7' : 'Mens@rium',\n '8' : 'Café Bingen Rochusberg',\n '9' : 'Mensablitz'\n}\n\ndisplay = {\n '0' : 'Today',\n '1' : 'Aktuelle Woche',\n '2' : 'Nächste Woche'\n}\n\nroles = ('student', 'other', 'employee')\n\nextraLegend = {\n # Source: https://www.studierendenwerk-mainz.de/essentrinken/speiseplan/\n '1': 'mit Farbstoff',\n '2': 'mit Konservierungsstoff',\n '3': 'mit Antioxidationsmittel',\n '4': 'mit Geschmacksverstärker',\n '5': 'geschwefelt',\n '6': 'geschwärzt',\n '7': 'gewachst',\n '8': 'Phosphat',\n '9': 'mit Süßungsmitteln',\n '10': 'enthält eine Phenylalaninquelle',\n 'S' : 'Schweinefleisch',\n 'G' : 'Geflügelfleisch',\n 'R' : 'Rindfleisch',\n 'Gl' : 'Gluten',\n 'We' : 'Weizen (inkl. Dinkel)',\n 'Ro' : 'Roggen',\n 'Ge' : 'Gerste',\n 'Haf': 'Hafer',\n 'Kr' : 'Krebstiere und Krebstiererzeugnisse',\n 'Ei' : 'Eier und Eiererzeugnisse',\n 'Fi' : 'Fisch und Fischerzeugnisse',\n 'En' : 'Erdnüsse und Erdnusserzeugnisse',\n 'So' : 'Soja und Sojaerzeugnisse',\n 'La' : 'Milch und Milcherzeugnisse',\n 'Sl' : 'Sellerie und Sellerieerzeugnisse',\n 'Sf' : 'Senf und Senferzeugnisse',\n 'Se' : 'Sesamsamen und Sesamsamenerzeugnisse',\n 'Sw' : 'Schwefeldioxid und Sulfite > 10mg/kg',\n 'Lu' : 'Lupine und Lupinerzeugnisse',\n 'Wt' : 'Weichtiere und Weichtiererzeugnisse',\n 'Nu' : 'Schalenfrüchte',\n 'Man': 'Mandel',\n 'Has': 'Haselnüsse',\n 'Wa' : 'Walnüsse',\n 'Ka' : 'Kaschunüsse',\n 'Pe' : 'Pecanüsse',\n 'Pa' : 'Paranüsse',\n 'Pi' : 'Pistatien',\n 'Mac':'Macadamianüsse',\n\t'icon:vegan.png' : 'Vegan',\n\t'icon:La.png' : 'Lammfleisch'\n}\n\n\ndef parse_meals(canteen, url, display):\n\tcontent = urlopen(url + '?building_id=' + canteen + '&display_type=' + display).read().decode('utf-8', errors='ignore')\n\tdocument = parse(content, features='lxml')\n\tspeiseplan = document.find('div', class_='speiseplan')\n\t\n\tif speiseplan == 'none':\n\t\treturn 0\n\t\n\tspeisen = \"\"\n\t\n\t# Extrahiere Speiseplandaten\n\t#
\n\t#\t\t
\n\t#\t\t\t
Ausgabe 2
\n\t#\t\t\t\t
\n\t#\t\t\t\t\t
Quinoa Bratling (Gl) mit Reis und veganem Joghurt-Kräuter-Dip (3,Gl,So,Sf,Ge)
\n\t#\t\t\t\t\t
\n\t#\t\t\t\t\t
\n\t#\t\t\t\t\t
\n\t#\t\t\t\t\t3,40 € / 5,65 €\n\t#\t\t\t\t
\n\t#\t\t\t
\n\tfor v in speiseplan.find_all('div'):\n\t if not v.has_attr('class'):\n\t\t continue\n\n\t if v['class'][0] == 'speiseplan_date':\n\t\t # Print the String of Date\n\t\t # Format: Montag, 12. August 2020\n\t\t speisen += \" \\n\"\n\t\t \t\t \n\t if v['class'][0] == 'speiseplan_bldngall_name':\n\t\t # Get Mensa Name\n\t\t speisen += \"\\t \\n\"\n\t\t \n\t if v['class'][0] == 'speiseplancounter':\n\t\t # Print the Counter\n\t\t # It is the category in the elements\n\t\t # Format: Ausgabe X (X = Number)\n\t\t # str(v.string).strip()\n\t\t speisen += \"\\t\" + str(v.string).strip() + \" \\n\"\n\t\t \n\t if v['class'][0] == 'menuspeise':\n\t\t # Name des Gerichts\n\t\t name = str(v.find('div', class_=\"speiseplanname\").string).strip()\n\t\t \t\t \n\t\t \n\t\t # Remove the notes from Mealname and delete unnecessary spaces\n\t\t name = ' '.join(re.sub(r'\\((\\w+,?)+\\w+\\)', '', name).split())\n\t\t if len(name) > 250:\n name = name[:245] + '...' \n\t\t \t\t\n\t\t notes = [span['title'] for span in meal_data.find_all('span', 'tooltip')] \t\n\t\t notes += [img['title'] for img in meal_data.find_all('img')]\n\t\t \n\t\t # Preis aus v extrahieren\n\t\t # 3,40 € / 5,65 €\n\t\t prices = price_regex.findall(str(v))\n\t\t speisen += \"\\t\\t\\t\" + prices[0].replace(',', '.') + \" \\n\"\n\t\t speisen += \"\\t\\t\\t\" + prices[1].replace(',', '.') + \" \\n\"\n\t\t \n\treturn speisen\n\n\t# Find and convert Legend\n\nspeisen = parse_meals('0', url, '2')\n\nprint(speisen)\n","sub_path":"mainz.py","file_name":"mainz.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411495695","text":"import os\nimport nibabel\nfrom pypreprocess.subject_data import SubjectData\nfrom nose.tools import assert_equal, assert_true, assert_false\nimport nose\nfrom ._test_utils import create_random_image\n\nDATA_DIR = \"test_tmp_data\"\nif not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n\ndef test_sujectdata_init():\n sd = SubjectData(anat='/tmp/anat.nii.gz', func='/tmp/func.nii.gz')\n assert_equal(sd.anat, \"/tmp/anat.nii.gz\")\n assert_equal(sd.func, \"/tmp/func.nii.gz\")\n\n\ndef test_sujectdata_sanitize():\n def _make_sd(ext=\".nii.gz\"):\n func = create_random_image(ndim=4)\n anat = create_random_image(ndim=3)\n func_filename = '%s/func%s' % (DATA_DIR, ext)\n anat_filename = '%s/anat%s' % (DATA_DIR, ext)\n nibabel.save(func, func_filename)\n nibabel.save(anat, anat_filename)\n sd = SubjectData(anat=anat_filename,\n func=func_filename,\n output_dir=\"/tmp/titi\")\n\n return sd\n\n sd = _make_sd(ext=\".nii.gz\")\n sd.sanitize()\n assert_equal(os.path.basename(sd.func[0]), \"func.nii.gz\")\n assert_equal(os.path.basename(sd.anat), \"anat.nii.gz\")\n\n sd = _make_sd(ext=\".nii.gz\")\n sd.sanitize(niigz2nii=True)\n assert_equal(os.path.basename(sd.func[0]), \"func.nii\")\n assert_equal(os.path.basename(sd.anat), \"anat.nii\")\n\n sd = _make_sd(ext=\".nii\")\n sd.sanitize()\n assert_equal(os.path.basename(sd.func[0]), \"func.nii\")\n assert_equal(os.path.basename(sd.anat), \"anat.nii\")\n\n sd = _make_sd(ext=\".nii\")\n sd.sanitize(niigz2nii=True)\n assert_equal(os.path.basename(sd.func[0]), \"func.nii\")\n assert_equal(os.path.basename(sd.anat), \"anat.nii\")\n\n# run all tests\nnose.runmodule(config=nose.config.Config(\n verbose=2,\n nocapture=True,\n ))\n","sub_path":"pypreprocess/tests/test_subject_data.py","file_name":"test_subject_data.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621201657","text":"\n# Write custom data generator for training and testing our CNN network for crop classification using label smoothing\n#\n######\n\nimport numpy as np\n#np.random.seed(100)\nimport os, glob\nimport pandas as pd\nfrom random import shuffle\nimport tensorflow as tf\nfrom tensorflow.python.keras.utils import to_categorical\n#from visualize import visualize_time_series\nfrom sklearn.preprocessing import StandardScaler\nfrom PIL import Image\nimport pandas as pd\nfrom pathlib import Path\nimport sys\n\ndef one_hot_encode(class_names, datapoint):\n # class_names is a list\n # data point is a string\n #print(datapoint, class_names)\n class_names.sort()\n class_vector = np.zeros(len(class_names))\n class_index = class_names.index(datapoint)\n class_vector[class_index] = 1\n return(class_vector)\n\ndef get_all_files(data_path):\n \"\"\"\n Extract all class names\n Input: data_path: str, path to the data folder containing all the classes \n \"\"\"\n classes_paths = glob.glob(os.path.join(data_path, \"*\"))\n classes_names = [os.path.basename(x) for x in classes_paths]\n classes_names.sort()\n all_data = {}\n for class_name in classes_names:\n curr_class_imgs = glob.glob(os.path.join(data_path, class_name, \"*.csv\"))\n for curr_img in curr_class_imgs:\n all_data[os.path.abspath(curr_img)] = int(class_name)\n return all_data \n\ndef label_smooth(num_classes, epsilon, encoded_data_point):\n \"\"\"\n Simple label smoothing \n Input: num_classes: number of classes\n epsilon: epsilon value \n encoded_data_point: one hot encoded data point\n \"\"\"\n smoothed_point = encoded_data_point * [1. - epsilon] + (1. - encoded_data_point) * (epsilon/float(num_classes - 1.))\n return smoothed_point\n\ndef replace_nans(data):\n \"\"\"\n Replace any nans with nearest values\n Source: https://stackoverflow.com/questions/9537543/replace-nans-in-numpy-array-with-closest-non-nan-value\n \"\"\"\n mask = np.isnan(data)\n data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), data[~mask])\n return(data)\n\n\ndef preprocess_batches(image_batch, class_names, num_classes, epsilon, resize_width_and_height, mode):\n return_x_img = []\n return_x_ts = []\n return_y = []\n for image_path in image_batch:\n curr_class = int(os.path.basename(os.path.dirname(image_path)))\n curr_file_name = os.path.basename(image_path).split('.')[0]\n curr_ts_path = os.path.join(str(Path(image_path).parent.parent.parent)+'-ts', mode, str(curr_class), '{}.csv'.format(curr_file_name))\n curr_ts = pd.read_csv(curr_ts_path)\n if curr_ts.empty:\n print(\"{} seems to be empty: pls check \".format(ts_path))\n sys.exit(0)\n curr_ts_data = curr_ts['NDVI'].values\n curr_ts_data = replace_nans(curr_ts_data)\n curr_ts_data = curr_ts_data[:, np.newaxis]\n curr_ts_data = StandardScaler().fit_transform(curr_ts_data)\n curr_class_encoded = one_hot_encode(class_names, curr_class) \n curr_class_label_smoothed = label_smooth(num_classes = num_classes, epsilon = epsilon, encoded_data_point = curr_class_encoded)\n return_y.append(curr_class_label_smoothed)\n curr_img = Image.open(image_path).resize(resize_width_and_height, Image.NEAREST)\n curr_img = np.array(curr_img)\n curr_img = curr_img * (1./255.) \n return_x_img.append(curr_img)\n return_x_ts.append(curr_ts_data)\n return_x_ts = np.array(return_x_ts, dtype=np.float32)\n assert len(return_x_img) == len(return_y), \"TrainingX images and Y lengths mismatch!\"\n assert len(return_x_ts) == len(return_y), \"TrainingX time series and Y lengths mismatch!\"\n return [return_x_img, return_x_ts, return_y] \n\ndef preprocess_test_img(image_path, class_names, num_classes, epsilon, resize_width_and_height, mode):\n return_x_img = []\n return_x_ts = []\n return_y = []\n curr_class = int(os.path.basename(os.path.dirname(image_path)))\n curr_file_name = os.path.basename(image_path).split('.')[0]\n curr_ts_path = os.path.join(str(Path(image_path).parent.parent.parent)+'-ts', mode, str(curr_class), '{}.csv'.format(curr_file_name))\n curr_ts = pd.read_csv(curr_ts_path)\n if curr_ts.empty:\n print(\"{} seems to be empty: pls check \".format(ts_path))\n sys.exit(0)\n curr_ts_data = curr_ts['NDVI'].values\n curr_ts_data = replace_nans(curr_ts_data)\n curr_ts_data = curr_ts_data[:, np.newaxis]\n curr_ts_data = StandardScaler().fit_transform(curr_ts_data)\n return_y.append(curr_class)\n curr_img = Image.open(image_path).resize(resize_width_and_height, Image.NEAREST)\n curr_img = np.array(curr_img) \n curr_img = curr_img * (1./255.)\n return_x_img.append(curr_img)\n return_x_ts.append(curr_ts_data)\n return_x_ts = np.array(return_x_ts, dtype=np.float32)\n assert len(return_x_img) == len(return_y), \"TrainingX images and Y lengths mismatch!\"\n assert len(return_x_ts) == len(return_y), \"TrainingX time series and Y lengths mismatch!\"\n return [return_x_img, return_x_ts, return_y] \n\n\n\"\"\"\ndef preprocess_ts_batches(ts_batch, class_names, num_classes, epsilon):\n return_x = []\n return_y = []\n for ts_path in ts_batch:\n curr_class = int(os.path.basename(os.path.dirname(ts_path)))\n curr_class_encoded = one_hot_encode(class_names, curr_class) \n curr_class_label_smoothed = label_smooth(num_classes = num_classes, epsilon = epsilon, encoded_data_point = curr_class_encoded)\n return_y.append(curr_class_label_smoothed)\n #print(os.path.isfile(ts_path))\n curr_ts = pd.read_csv(ts_path)\n if curr_ts.empty:\n print(\"{} seems to be empty: pls check \".format(ts_path))\n curr_data = curr_ts['NDVI'].values\n curr_data = replace_nans(curr_data)\n curr_data = curr_data[:, np.newaxis]\n curr_data = StandardScaler().fit_transform(curr_data)\n return_x.append(curr_data)\n return_x = np.array(return_x, dtype=np.float32)\n #print(return_x)\n #return_x = return_x[:, :, np.newaxis]\n return_y = np.array(return_y, dtype = np.float32)\n assert return_x.shape[0] == return_y.shape[0], \"TrainingX and Y lengths mismatch!\"\n return [return_x, return_y] \n\"\"\"\n \ndef crop_generator(input_path, batch_size=32, mode=\"train\", num_classes =6, epsilon = 0, resize_params = (224, 224), do_shuffle=True):\t\n \"\"\"\n Simple data generator that reads all images based on mode, picks up corresponding time series, returns entire list\n \"\"\"\n data_path = os.path.join(input_path, mode)\n all_images = glob.glob(os.path.join(data_path, \"**/*.jpg\"))\n #all_ts = glob.glob(os.path.join(data_path, \"**/*.csv\"))\n print(\"Found {} files for {}\".format(len(all_images), mode))\n if do_shuffle:\n shuffle(all_images)\n curr_idx = 0\n while True:\n # create random batches first\n #batch_paths = np.random.choice(a= all_images, size = batch_size)\n # initialize our batches of images and labels\n imgs = []\n ts = []\n labels = [] \n if curr_idx > len(all_images): # reset if you've parsed all data\n curr_idx = 0\n curr_batch = all_images[curr_idx: (curr_idx + batch_size)]\n _, ts, labels = preprocess_batches(image_batch= curr_batch, class_names = [0,1,2,3,4,5], num_classes = num_classes, epsilon = epsilon, resize_width_and_height=resize_params, mode=mode) \n ts = np.array(ts)\n labels = np.array(labels)\n curr_idx += batch_size \n yield (ts, labels)\n\ndef test_crop_generator(input_path, batch_size=1, mode=\"test\", num_classes =6, epsilon = 0, resize_params = (224, 224), do_shuffle=True):\t\n \"\"\"\n Simple data generator that reads all images based on mode, picks up corresponding time series, returns entire list\n \"\"\"\n data_path = os.path.join(input_path, mode)\n all_images = glob.glob(os.path.join(data_path, \"**/*.jpg\"))\n print(\"Found {} files for {}\".format(len(all_images), mode))\n if do_shuffle:\n shuffle(all_images)\n curr_idx = 0\n while curr_idx < len(all_images):\n # create random batches first\n #batch_paths = np.random.choice(a= all_images, size = batch_size)\n # initialize our batches of images and labels\n #print(all_images[curr_idx])\n imgs = []\n ts = []\n labels = [] \n curr_batch = all_images[curr_idx]\n _, ts, labels = preprocess_test_img(image_path= curr_batch, class_names = [0,1,2,3,4,5], num_classes = num_classes, epsilon = epsilon, resize_width_and_height=resize_params, mode=mode) \n #imgs = np.array(imgs)\n ts = np.array(ts)\n labels = np.array(labels)\n curr_idx += batch_size\n yield (ts, labels, curr_batch)\n\n# res = crop_generator('/mnt/data/kgadira/food-water-energy/filtered-extracts-subset')\n# for a in res:\n# print(a)\n\n\n","sub_path":"lstm/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":8891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49689268","text":"# -*- coding: utf-8 -*-\r\nimport scrapy\r\nimport hashlib\r\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\r\nfrom scrapy.contrib.linkextractors import LinkExtractor\r\nfrom scrapy.selector import Selector\r\nfrom forum.items import PostItemsList\r\nimport logging\r\nimport lxml.html\r\nfrom lxml.etree import ParserError\r\nfrom lxml.cssselect import CSSSelector\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport urlparse\r\nimport urllib\r\nimport string\r\nimport dateparser\r\nimport time\r\n\r\n\r\n## LOGGING to file\r\n#import logging\r\n#from scrapy.log import ScrapyFileLogObserver\r\n\r\n#logfile = open('testlog.log', 'w')\r\n#log_observer = ScrapyFileLogObserver(logfile, level=logging.DEBUG)\r\n#log_observer.start()\r\n\r\n\r\nclass ForumsSpider(CrawlSpider):\r\n name = \"hepc_hepmag_spider\"\r\n _allowed_domain = {\"forums.hepmag.com\" }\r\n start_urls = [\r\n \"http://forums.hepmag.com/index.php?board=31.0\"\r\n ]\r\n\r\n rules = (\r\n # Rule to go to the single product pages and run the parsing function\r\n # Excludes links that end in _W.html or _M.html, because they point to \r\n # configuration pages that aren't scrapeable (and are mostly redundant anyway)\r\n Rule(LinkExtractor(\r\n restrict_xpaths='//span[contains(@id,\"msg_\")]/a',\r\n canonicalize=True,\r\n ), callback='parsePostsList'),\r\n # Rule to follow arrow to next product grid\r\n Rule(LinkExtractor(\r\n restrict_xpaths='//a[@class=\"navPages\"]',\r\n canonicalize=True,\r\n deny=(r'profile',)\r\n ), follow=True),\r\n )\r\n \r\n def getDate(self,date_str):\r\n # date_str=\"Fri Feb 12, 2010 1:54 pm\"\r\n try:\r\n date = dateparser.parse(date_str)\r\n epoch = int(date.strftime('%s'))\r\n create_date = time.strftime(\"%Y-%m-%d'T'%H:%M%S%z\", time.gmtime(epoch))\r\n return create_date\r\n except Exception:\r\n #logging.error(\">>>>>\"+date_str)\r\n return date_str\r\n\r\n def urlRemove(self,url,keyToFind):\r\n url_parts = list(urlparse.urlparse(url))\r\n query = dict(urlparse.parse_qsl(url_parts[4]))\r\n \r\n for q in query.keys():\r\n if q == keyToFind:\r\n query.pop(q,None)\r\n url_parts[4] = urllib.urlencode(query)\r\n return urlparse.urlunparse(url_parts)\r\n \r\n\r\n def cleanText(self,text,printableOnly=True):\r\n soup = BeautifulSoup(text,'html.parser')\r\n text = soup.get_text();\r\n text = re.sub(\"(-+| +|\\n|\\r|\\t|\\0|\\x0b|\\xa0|\\xbb|\\xab)+\",' ',text).strip()\r\n if(printableOnly):\r\n return filter(lambda x: x in string.printable, text)\r\n return text \r\n \r\n # https://github.com/scrapy/dirbot/blob/master/dirbot/spiders/dmoz.py\r\n # https://github.com/scrapy/dirbot/blob/master/dirbot/pipelines.py\r\n def parsePostsList(self,response):\r\n condition =\"hepatitis-c\"\r\n try:\r\n document = lxml.html.fromstring(response.body)\r\n document.make_links_absolute(base_url=response.url, resolve_base_href=True)\r\n except ParserError:\r\n return\r\n items =[]\r\n postWrappers = CSSSelector('.post_wrapper')(document)\r\n for postWrapper in postWrappers:\r\n post = PostItemsList()\r\n keyinfo = postWrapper.cssselect(\".keyinfo\")[0]\r\n poster = postWrapper.cssselect(\".poster\")[0]\r\n post['author'] = poster.xpath(\"./h4/a/text()\")[0]\r\n post['author_link'] = poster.xpath(\"./h4/a/@href\")[0]\r\n post['condition'] = condition\r\n create_date = self.cleanText(\" \".join(keyinfo.cssselect('.smalltext')[0].xpath(\"text()\")))\r\n post['create_date'] = self.getDate(create_date)\r\n item['domain'] = \"\".join(self.allowed_domains)\r\n post['topic'] = keyinfo.cssselect('h5')[0].xpath(\"./a/text()\")[0]\r\n post['post'] = self.cleanText(\" \".join(postWrapper.cssselect(\".post\")[0].xpath(\"./div/text()\")))\r\n post['url'] = self.urlRemove(response.url,\"PHPSESSID\")\r\n items.append(post)\r\n return items\r\n \r\n","sub_path":"forum/spiders/hepc_hepmag_spider.py","file_name":"hepc_hepmag_spider.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"179929951","text":"import tensorflow as tf\nimport numpy as np\n\nsess = tf.Session()\n# put multiple ops on the same computational graph\n\n# create the data\nmy_array = np.array([\n [1., 3., 5., 7., 8.],\n [-1., 3., 0., 9., 2.],\n [-2., 3., 4., 3., 3.],\n])\nx_vals = np.array([my_array, my_array + 1])\n# 2 * 3 * 5\n\n# x_data = tf.placeholder(tf.float32, shape=(3, 5))\nx_data = tf.placeholder(tf.float32, shape=(3, None))\n\n# create the constant for matrix mul and add\nm1 = tf.constant([[1.0], [0.], [-1.], [2.], [4.]])\nm2 = tf.constant([[2.]])\na1 = tf.constant([[10.]])\n\n# declare te ops and add them to the graph\n\nprod1 = tf.matmul(x_data, m1)\nprod2 = tf.matmul(prod1, m2)\nadd1 = tf.add(prod2, a1)\n\n# feed the data\nfor x_val in x_vals:\n print(sess.run(add1, feed_dict={x_data: x_val}))\n\n","sub_path":"Tensorflow/2_the_tensorflow_way/2_layering_nested_ops.py","file_name":"2_layering_nested_ops.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"649863392","text":"import json\nimport argparse\nimport os\n\n\ndef create_parser():\n parser = argparse.ArgumentParser(description='Поиск бара')\n parser.add_argument(\n 'longitude',\n type=float,\n help='It is your coordinates'\n )\n parser.add_argument(\n 'latitude',\n type=float,\n help='It is your coordinates'\n )\n parser.add_argument(\n 'filepath',\n type=str,\n help='Way to json file with data'\n )\n return parser\n\n\ndef load_json_data_from_file(filepath):\n with open(filepath, 'r', encoding='utf-8') as json_file:\n json_data = json.loads(json_file.read())\n return json_data\n\n\ndef get_biggest_bar(bars_data):\n biggest_bar = max(\n bars_data,\n key=lambda bar: bar['properties']['Attributes']['SeatsCount']\n )\n return biggest_bar\n\n\ndef get_smallest_bar(bars_data):\n smallest_bar = min(\n bars_data,\n key=lambda bar: bar['properties']['Attributes']['SeatsCount']\n )\n return smallest_bar\n\n\ndef get_distance(user_lon, user_lat, longitude, latitude):\n distance = abs(user_lon - longitude) + abs(user_lat - latitude)\n return distance\n\n\ndef get_closest_bar(bars_data, longitude, latitude):\n closest_bar = min(\n bars_data,\n key=lambda bar:\n get_distance(\n bar['geometry']['coordinates'][0],\n bar['geometry']['coordinates'][1],\n longitude,\n latitude\n )\n )\n return closest_bar\n\n\nif __name__ == '__main__':\n parser = create_parser()\n args = parser.parse_args()\n if os.path.exists(args.filepath):\n try:\n json_data = load_json_data_from_file(args.filepath)\n bars = json_data['features']\n print(\n 'Наименьший бар',\n get_smallest_bar(bars)['properties']['Attributes']['Name']\n )\n print(\n 'Наибольший бар',\n get_biggest_bar(bars)['properties']['Attributes']['Name']\n )\n print(\n 'Ближайший бар',\n get_closest_bar(\n bars,\n args.longitude,\n args.latitude\n )['properties']['Attributes']['Name']\n )\n except ValueError:\n print('Ошибка переобразования JSON файла')\n else:\n print('Файла {} не найдено!'.format(args.filepath))\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260011210","text":"# Set spark environments\nimport os\n# os.environ[\"SPARK_HOME\"] = '/home/ypang6/anaconda3/lib/python3.7/site-packages/pyspark'\n# os.environ[\"PYTHONPATH\"] = '/home/ypang6/anaconda3/bin/python3.7'\n# os.environ['PYSPARK_PYTHON'] = '/home/ypang6/anaconda3/bin/python3.7'\n# os.environ['PYSPARK_DRIVER_PYTHON'] = '/home/ypang6/anaconda3/bin/python3.7'\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nimport glob\nimport pandas as pd\n\nspark = SparkSession \\\n .builder \\\n .appName(\"Terminal_Area_Flight_Data_Query\") \\\n .config(\"spark.ui.port\", \"4041\") \\\n .getOrCreate()\n\nmyschema = StructType([\n StructField(\"recType\", ShortType(), True), #1 //track point record type number\n StructField(\"recTime\", StringType(), True), #2 //seconds since midnigght 1/1/70 UTC\n StructField(\"fltKey\", LongType(), True), #3 //flight key\n StructField(\"bcnCode\", IntegerType(), True), #4 //digit range from 0 to 7\n StructField(\"cid\", IntegerType(), True), #5 //computer flight id\n StructField(\"Source\", StringType(), True), #6 //source of the record\n StructField(\"msgType\", StringType(), True), #7\n StructField(\"acId\", StringType(), True), #8 //call sign\n StructField(\"recTypeCat\", IntegerType(), True), #9\n StructField(\"lat\", DoubleType(), True), #10\n StructField(\"lon\", DoubleType(), True), #11\n StructField(\"alt\", DoubleType(), True), #12 //in 100s of feet\n StructField(\"significance\", ShortType(), True), #13 //digit range from 1 to 10\n StructField(\"latAcc\", DoubleType(), True), #14\n StructField(\"lonAcc\", DoubleType(), True), #15\n StructField(\"altAcc\", DoubleType(), True), #16\n StructField(\"groundSpeed\", IntegerType(), True), #17 //in knots\n StructField(\"course\", DoubleType(), True), #18 //in degrees from true north\n StructField(\"rateOfClimb\", DoubleType(), True), #19 //in feet per minute\n StructField(\"altQualifier\", StringType(), True), #20 //Altitude qualifier (the “B4 character”)\n StructField(\"altIndicator\", StringType(), True), #21 //Altitude indicator (the “C4 character”)\n StructField(\"trackPtStatus\", StringType(), True), #22 //Track point status (e.g., ‘C’ for coast)\n StructField(\"leaderDir\", IntegerType(), True), #23 //int 0-8 representing the direction of the leader line\n StructField(\"scratchPad\", StringType(), True), #24\n StructField(\"msawInhibitInd\", ShortType(), True), #25 // MSAW Inhibit Indicator (0=not inhibited, 1=inhibited)\n StructField(\"assignedAltString\", StringType(), True), #26\n StructField(\"controllingFac\", StringType(), True), #27\n StructField(\"controllingSec\", StringType(), True), #28\n StructField(\"receivingFac\", StringType(), True), #29\n StructField(\"receivingSec\", StringType(), True), #30\n StructField(\"activeContr\", IntegerType(), True), #31 // the active control number\n StructField(\"primaryContr\", IntegerType(), True), #32 //The primary(previous, controlling, or possible next)controller number\n StructField(\"kybrdSubset\", StringType(), True), #33 //identifies a subset of controller keyboards\n StructField(\"kybrdSymbol\", StringType(), True), #34 //identifies a keyboard within the keyboard subsets\n StructField(\"adsCode\", IntegerType(), True), #35 //arrival departure status code\n StructField(\"opsType\", StringType(), True), #36 //Operations type (O/E/A/D/I/U)from ARTS and ARTS 3A data\n StructField(\"airportCode\", StringType(), True), #37\n StructField(\"trackNumber\", IntegerType(), True), #38\n StructField(\"tptReturnType\", StringType(), True), #39\n StructField(\"modeSCode\", StringType(), True) #40\n])\n\n\ndef take_arrival_flights(date, point):\n # load csv into dataframe\n file_path = glob.glob(\"/media/ypang6/paralab/Research/data/ATL/IFF_ATL+ASDEX_{}*.csv\".format(date))[0]\n df = spark.read.csv(file_path, header=False, sep=\",\", schema=myschema)\n cols = ['recType', 'recTime', 'acId', 'lat', 'lon', 'alt']\n df = df.select(*cols).filter(df['recType']==3).withColumn(\"recTime\", df['recTime'].cast(IntegerType()))\n\n # seperate arrival flights and departure flights\n cs_dep = []\n cs_arr = []\n cs_unknown = []\n for x in df.select('acId').distinct().collect():\n temp_df = df.filter(df['acId'] == x['acId'])\n if temp_df.select(['alt']).take(1)[0][0] == 10.06:\n cs_dep.append(x['acId'])\n elif temp_df.orderBy(temp_df.recTime.desc()).select('alt').take(1)[0][0] == 10.06:\n cs_arr.append(x['acId'])\n else:\n cs_unknown.append(x['acId'])\n\n # create arrival flights dataframe\n df_arr = df.filter(df.acId.isin(cs_arr) == True)\n\n # filter points close to point and save into csv\n faf9rflight = df_arr.filter(df_arr['lat']>=point[0]-radius).filter(df_arr['lat']<=point[0]+radius).\\\n filter(df_arr['lon']>=point[1]-radius).filter(df_arr['lon']<=point[1]+radius)\n faf9rflight.coalesce(1).write.csv('./faf9rflights/{}'.format(date))\n\n\nif __name__ == '__main__':\n\n timestamp = 1567346400 # 2PM\n FAF_9L = (33.63465, -84.54984166666667) # waypoint NIVII (FAF of KATL runway 9L)\n FAF_9R = (33.63172777777777, -84.54940555555555) # waypoint BURNY (FAF of KATL runway 9R)\n IF_9R = (33.631397222222226, -84.71883611111112) # waypoint GGUYY (IF of KATL runway 9R)\n IAF_9L = (33.63394722222222, -84.86316388888888) # waypoint RYENN (IAF of KATL runway 9L)\n IAF_9R = (33.63093611111111, -84.86295) # waypoint ANDIY (IAF of KATL runway 9R)\n IF_27R = (33.63430555555556, -84.12904722222221) # waypoint MAASN (IF of KATL runway 27R)\n IAF_27R = (33.633874999999996, -83.99111666666667) # waypoint YOUYU (IAF of KATL runway 27R)\n radius = 0.001\n\n start_date = 20190801\n end_date = 20190831\n\n for date in range(start_date, end_date+1):\n take_arrival_flights(date, FAF_9R)\n print(\"finish processing date {}\".format(date))\n\n","sub_path":"QueryTerminal_31days.py","file_name":"QueryTerminal_31days.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551166787","text":"import numpy as np\nfrom Sbox import sbox, rbox, rcon\n\ndef mul2(r):\n b = [0 for i in range(4)]\n for c in range(0, 4):\n h = (r[c] >> 7) & 1\n b[c] = r[c] << 1\n b[c] ^= h * 0x1B\n return b\n\ndef mul3(r):\n b = mul2(r);\n for c in range(0, 4):\n b[c] = b[c] ^ r[c]\n return b\n\ndef mul9(r):\n b = list.copy(r)\n for i in range(0, 3):\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n return b\n\ndef mul11(r):\n b = list.copy(r)\n b = mul2(b)\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n return b\n\ndef mul13(r):\n b = list.copy(r)\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n b = mul2(b)\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n return b \n\ndef mul14(r):\n b = list.copy(r)\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n b = mul2(b)\n for c in range(0, 4):\n b[c] ^= r[c]\n b = mul2(b)\n return b\n\ndef rotWord(r):\n r[0], r[1], r[2], r[3] = r[1], r[2], r[3], r[0]\n return r\n\ndef keyExpansion(key):\n retkey = []\n retkey.append(list.copy(key))\n for i in range(0, 10):\n newkey = [];\n interkey = list.copy(retkey[-1]) # 4x4 array\n interkey = np.transpose(interkey)\n interkey = interkey.tolist()\n rconarr = [rcon[i], 0, 0, 0]\n workingarr = list.copy(interkey[-1]) # 1x4 array\n workingarr = rotWord(workingarr)\n for q in range(0, 4):\n workingarr[q] = sbox[workingarr[q]]\n for j in range(0, len(workingarr)):\n workingarr[j] = workingarr[j] ^ interkey[0][j] ^ rconarr[j]\n newkey.append(list.copy(workingarr))\n for k in range(1, 4):\n for j in range(0, 4):\n workingarr[j] = workingarr[j] ^ interkey[k][j]\n newkey.append(list.copy(workingarr))\n newkey = np.transpose(newkey)\n newkey = newkey.tolist()\n retkey.append(newkey)\n \n # FOR PRINTING\n\n # for v in range(0, 4):\n # for u in range(0, 4):\n # print(\"{:0x}\".format(newkey[v][u]), end=\" \");\n # print()\n # print(\"______________________\")\n\n return retkey\n \n\n","sub_path":"aesUtils.py","file_name":"aesUtils.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2212116","text":"# Necessary dependency\r\nimport random\r\n# Name of class\r\nclass Encryption(object):\r\n # Initialization function\r\n def __init__(self,key_seed,layers,characters):\r\n # Setting the random seed for key generation\r\n self.key_seed = key_seed\r\n # All of the potential characters to be used in input string and key\r\n self.alphabet = list(\"\"\"{}\"\"\".format(characters))\r\n # Right now the key is nothing\r\n self.key = None\r\n # The number of layers represent the number of occurences of recursive obfuscation\r\n # Can be seen as a secondary key\r\n self.layers = layers\r\n # Temporary variable to hold text\r\n self.iterText = None\r\n # Encryption dictionary\r\n self.encrypt = {}\r\n # Decryption dictionary\r\n self.decrypt = {}\r\n\r\n # Function to generate keys randomly based off of the random seed, used in an iterative manner\r\n def __iterGenKey(self,key_seed):\r\n # first we create a temporary key - which is a copy of the alphabet set in the init function\r\n key = self.alphabet.copy()\r\n # then we randomize that temporary key based on the key seed passed in in this function specifically\r\n # the key seed is dynamic throughout this program\r\n random.Random(key_seed).shuffle(key)\r\n # then we set the key to self.key\r\n self.key = key\r\n # then we set the encryption and decryption dictionaries to use to encrypt and decrypt themessage\r\n for i in range(len(self.alphabet)):\r\n self.encrypt[self.alphabet[i]] = self.key[i]\r\n for i in range(len(self.alphabet)):\r\n self.decrypt[self.key[i]] = self.alphabet[i]\r\n\r\n # this function takes in some text, and encodes it based on the information in the encryption dictionary\r\n # think of a randomized ceasars cipher\r\n def __iterEncode(self,text):\r\n text = list(text)\r\n encoded_text = []\r\n for i in text:\r\n encoded_text.append(self.encrypt[i])\r\n return ''.join(encoded_text)\r\n\r\n # this function takes in some text, and decodes it based on the information in the decryption dictionary\r\n # again - it is similar to a randomized ceasar cipher\r\n def __iterDecode(self,text):\r\n text = list(text)\r\n decoded_text = []\r\n for i in text:\r\n decoded_text.append(self.decrypt[i])\r\n return ''.join(decoded_text)\r\n\r\n # this function takes in text, and for every single character in the text, generates a new random key\r\n # and places that new letter in a new list which is then joined to a string\r\n def __charEncode(self,text):\r\n self.iterText = text\r\n hypertext = []\r\n for j in list(range(len(self.iterText))):\r\n self.__iterGenKey(self.key_seed*j)\r\n #print(''.join(self.key))\r\n #print()\r\n hypertext.append(self.__iterEncode(self.iterText[j]))\r\n return \"\".join(hypertext)\r\n\r\n # This function reverses the process from the last function\r\n def __charDecode(self,text):\r\n self.iterText = text\r\n hypertext = []\r\n for j in list(range(len(self.iterText)))[::-1]:\r\n self.__iterGenKey(self.key_seed*j)\r\n #print(self.key)\r\n #print()\r\n hypertext.insert(0,self.__iterDecode(self.iterText[j]))\r\n return \"\".join(hypertext)\r\n\r\n # This function obfuscates the encrypted text by iteratively repeating the entire process up to this point\r\n # to the number of layers specified\r\n def encode(self,text):\r\n self.iterText = text\r\n for i in range(self.layers):\r\n self.iterText = self.__charEncode(self.iterText)\r\n return self.iterText\r\n\r\n # This function un-obfuscates the process by iteratively repeating the entire process up to this point in reverse\r\n def decode(self,text):\r\n self.iterText = text\r\n for i in range(self.layers):\r\n self.iterText = self.__charDecode(self.iterText)\r\n return self.iterText\r\n\r\n# Essentially what this class does, is take in text, generate a totally random key (based on random seed), that varies\r\n# for each letter. It then does this whole process for the encrypted text repeatedly through as many layers as you want.\r\n# Do decrypt the text is simply to reverse the process.\r\n","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163653849","text":"from data_process.dataset import tokenizer_pt, tokenizer_en\n\nclass Config():\n num_layers = 6\n d_model = 512\n dff = 1024\n num_heads = 8\n input_vocab_size = tokenizer_pt.vocab_size + 2\n target_vocab_size = tokenizer_en.vocab_size + 2\n dropout_rate = 0.1\n max_length = 40\n buffer_size = 20000\n batch_size = 64","sub_path":"model_config.py","file_name":"model_config.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"491262616","text":"\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom .utils import categorical_to_one_hot\n\n# def segmentation_metrics(pred, target, smooth)\n\n\ndef cal_dice(pred_logit, target, smooth = 1e-8): # target is one hot\n pred = pred_logit.max(dim=1)[1]\n n_classes = pred_logit.shape[1]\n pred_one_hot = categorical_to_one_hot(pred, dim=1, expand_dim=True, n_classes=n_classes)\n \n dice = torch.zeros(n_classes)\n for i_class in range(n_classes):\n dice[i_class] = dice_perclass(pred_one_hot[:,i_class], target[:, i_class], smooth=smooth)\n return dice\n\ndef dice_perclass(pred, target, smooth = 1e-8): # both one hot\n intersection = (pred * target).sum()\n return (2. * intersection) / (pred.sum().float() + target.sum() + smooth)\n\ndef cal_iou(pred_logit, target, smooth = 1e-8): # target is one hot\n pred = pred_logit.max(dim=1)[1]\n n_classes = pred_logit.shape[1]\n pred_one_hot = categorical_to_one_hot(pred, dim=1, expand_dim=True, n_classes=n_classes)\n \n iou = torch.zeros(n_classes)\n for i_class in range(n_classes):\n iou[i_class] = iou_perclass(pred_one_hot[:,i_class], target[:, i_class], smooth=smooth)\n return iou\n\ndef iou_perclass(pred, target, smooth = 1e-8):\n intersection = (pred * target).sum()\n return intersection / (pred.sum().float() + target.sum() - intersection + smooth)\n\n\n\n# def dice_coeff_perclass(pred, target, smooth = 1e-8): # both one hot\n# batch_size = pred.shape[0]\n# m1 = pred.view(batch_size, -1) # Flatten\n# m2 = target.view(batch_size, -1) # Flatten\n# intersection = (m1 * m2).sum()\n \n\n# return (2. * intersection) / (m1.sum().float() + m2.sum() + smooth)\n\n# def iou_perclass(pred, target, smooth = 1e-8):\n# batch_size = pred.shape[0]\n# m1 = pred.view(batch_size, -1) # Flatten\n# m2 = target.view(batch_size, -1) # Flatten\n# intersection = (m1 * m2).sum()\n# union = ((m1 + m2)>0).sum().float()\n# # if union==0:\n# # return 0\n# return intersection / (union+smooth)\n\n\n\n\n# def iou(pred_logit, target):\n# n_classes = pred_logit.shape[1]\n# pred = pred_logit.max(dim=1, keepdim=True)[1]\n# pred_one_hot = categorical_to_one_hot(pred, dim=1, n_classes=n_classes).cpu().numpy().astype(bool)\n# y = y.cpu().numpy().astype(bool)\n\n# # y = y.cpu().numpy().astype(bool)\n# iou = np.zeros((batch_size, n_classes))\n# for i_instance in range(batch_size):\n# for i_class in range(n_classes):\n# if y[i_instance,i_class].sum()==0:\n# iou[i_instance, i_class] = 0\n# else: \n# iou[i_instance, i_class] = ((pred_one_hot[i_instance,i_class] & y[i_instance,i_class]).sum() / \n# (pred_one_hot[i_instance,i_class] | y[i_instance,i_class]).sum())\n# iou = iou.mean(0)\n\n","sub_path":"mylib/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"116963063","text":"from . import interface\n\n\ndef Text(data):\n\tif data is None:\n\t\treturn None\n\n\treturn interface.Text(\n\t\tcontent=data['content'],\n\t\tbegin_offset=data['beginOffset'],\n\t\tend_offset=data['endOffset'],\n\t)\n\n\ndef Token(data):\n\tif data is None:\n\t\treturn None\n\n\treturn interface.Token(\n\t\ttext=Text(data['text']),\n\t\tpart_of_speech_tag=data['partOfSpeechTag'],\n\t\tlemma=data['lemma'],\n\t)\n\n\ndef Relation(data):\n\tif data is None:\n\t\treturn None\n\n\treturn interface.Relation(\n\t\tsubject=data['subject'],\n\t\tverb=data['verb'],\n\t\tobject=data['object'],\n\t\tadverbial_phrase=data['adverbialPhrase'],\n\t)\n\n\ndef Sentence(data):\n\tif data is None:\n\t\treturn None\n\n\tif data.get('tokens') is None:\n\t\ttokens = None\n\telse:\n\t\ttokens = tuple(Token(i) for i in data['tokens'])\n\n\tif data.get('relations') is None:\n\t\trelations = None\n\telse:\n\t\trelations = tuple(Relation(i) for i in data['relations'])\n\n\tif data.get('text') is None:\n\t\ttext = None\n\telse:\n\t\ttext = Text(data['text'])\n\n\treturn interface.Sentence(\n\t\ttokens=tokens,\n\t\ttext=text,\n\t\trelations=relations,\n\t)\n","sub_path":"intellexer/linguistic_processor/constructors.py","file_name":"constructors.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"526805881","text":"# coding: utf-8\n\nimport unittest\n\nfrom textwrap import dedent\nfrom multiprocessing import Pool\nfrom uuid import uuid4\nfrom time import sleep, time\n\nimport zmq\n\nfrom pypelinin import Job, Pipeline, PipelineManager, PipelineForPipeliner\n\n\nclass JobTest(unittest.TestCase):\n def test_worker_name(self):\n self.assertEqual(Job('ABC').worker_name, 'ABC')\n\n def test_should_start_with_no_data(self):\n self.assertEqual(Job('ABC').data, None)\n\n def test_repr(self):\n self.assertEqual(repr(Job('ABC')), \"Job('ABC')\")\n\n def test_equal_not_equal_and_hash(self):\n job_1 = Job('qwe')\n job_2 = Job('qwe')\n job_3 = Job('bla')\n self.assertTrue(job_1 == job_2)\n self.assertTrue(job_2 == job_1)\n self.assertTrue(job_1 != job_3)\n self.assertTrue(job_3 != job_1)\n self.assertEqual(hash(job_1), hash(job_2))\n self.assertNotEqual(hash(job_1), hash(job_3))\n\n def test_serialize_and_deserialize(self):\n with self.assertRaises(ValueError):\n Job.deserialize({}) # no key 'worker_name'\n\n job = Job('test')\n expected = tuple({'worker_name': 'test'}.items())\n self.assertEqual(job.serialize(), expected)\n self.assertEqual(Job.deserialize(expected), job)\n\n job_with_data = Job('testing', data={'python': 42, 'spam': 'eggs'})\n expected_with_data = {'worker_name': 'testing',\n 'data': tuple({'python': 42,\n 'spam': 'eggs'}.items())}\n expected_with_data = tuple(expected_with_data.items())\n self.assertEqual(job_with_data.serialize(), expected_with_data)\n self.assertEqual(Job.deserialize(expected_with_data), job_with_data)\n self.assertEqual(Job.deserialize(job_with_data.serialize()).serialize(),\n job_with_data.serialize())\n\nclass PipelineTest(unittest.TestCase):\n def test_only_accept_Job_objects(self):\n with self.assertRaises(ValueError):\n Pipeline({'test': 123})\n\n def test_jobs(self):\n result = Pipeline({Job('A'): [Job('B')],\n Job('B'): [Job('C'), Job('D'), Job('E')],\n Job('Z'): [Job('W')],\n Job('W'): Job('A')}).jobs\n expected = (Job('A'), Job('B'), Job('C'), Job('D'), Job('E'), Job('W'),\n Job('Z'))\n self.assertEqual(set(result), set(expected))\n\n def test_get_starters(self):\n result = Pipeline({Job('A'): []}).starters\n expected = (Job('A'),)\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('A'): [], Job('B'): []}).starters\n expected = (Job('A'), Job('B'))\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('A'): [Job('B')], Job('B'): []}).starters\n expected = (Job('A'),)\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('A'): [Job('B')],\n Job('B'): [Job('C'), Job('D'), Job('E')],\n Job('Z'): [Job('W')]}).starters\n expected = (Job('A'), Job('Z'))\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({(Job('A'), Job('B'), Job('C')): Job('D')}).starters\n expected = [Job('A'), Job('B'), Job('C')]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({(Job('A'), Job('B'), Job('C')): [Job('D')],\n Job('E'): (Job('B'), Job('F'))}).starters\n expected = (Job('A'), Job('C'), Job('E'))\n self.assertEqual(set(result), set(expected))\n\n def test_normalize(self):\n result = Pipeline({Job('A'): Job('B')})._graph\n expected = [(Job('A'), Job('B'))]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('A'): [Job('B')]})._graph\n expected = [(Job('A'), Job('B'))]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({(Job('A'),): (Job('B'),)})._graph\n expected = [(Job('A'), Job('B'))]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({(Job('A'), Job('C')): Job('B')})._graph\n expected = [(Job('A'), Job('B')), (Job('C'), Job('B'))]\n self.assertEqual(set(result), set(expected))\n\n graph = {(Job('A'), Job('C')): [Job('B'), Job('D'), Job('E')]}\n result = Pipeline(graph)._graph\n expected = [(Job('A'), Job('B')), (Job('A'), Job('D')),\n (Job('A'), Job('E')), (Job('C'), Job('B')),\n (Job('C'), Job('D')),\n (Job('C'), Job('E'))]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('ABC'): []})._graph # problem here if use string\n expected = [(Job('ABC'), None)]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('A'): [], Job('B'): []})._graph\n expected = [(Job('A'), None), (Job('B'), None)]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('A'): [Job('B')], Job('B'): []})._graph\n expected = [(Job('A'), Job('B')), (Job('B'), None)]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({Job('QWE'): [Job('B')],\n Job('B'): [Job('C'), Job('D'), Job('E')],\n Job('Z'): [Job('W')]})._graph\n expected = [(Job('QWE'), Job('B')), (Job('B'), Job('C')),\n (Job('B'), Job('D')), (Job('B'), Job('E')),\n (Job('Z'), Job('W'))]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({(Job('A'), Job('B'), Job('C')): [Job('D')]})._graph\n expected = [(Job('A'), Job('D')), (Job('B'), Job('D')),\n (Job('C'), Job('D'))]\n self.assertEqual(set(result), set(expected))\n\n result = Pipeline({(Job('A'), Job('B'), Job('C')): [Job('D')],\n Job('E'): (Job('B'), Job('F'))})._graph\n expected = [(Job('A'), Job('D')), (Job('B'), Job('D')),\n (Job('C'), Job('D')), (Job('E'), Job('B')),\n (Job('E'), Job('F'))]\n self.assertEqual(set(result), set(expected))\n\n def test_validate_graph(self):\n #should have at least one starter node\n with self.assertRaises(ValueError):\n Pipeline({Job('A'): Job('A')})\n with self.assertRaises(ValueError):\n Pipeline({Job('A'): [Job('B')], Job('B'): [Job('A')]})\n\n #should not have cycles\n with self.assertRaises(ValueError):\n Pipeline({Job('A'): [Job('B')], Job('B'): [Job('C')],\n Job('C'): [Job('B')]})._graph\n with self.assertRaises(ValueError):\n Pipeline({Job('A'): [Job('B')], Job('B'): [Job('C')],\n Job('C'): [Job('D')], Job('D'): [Job('B')]})\n\n def test_dot(self):\n result = Pipeline({(Job('A'), Job('B'), Job('C')): [Job('D')],\n Job('E'): (Job('B'), Job('F'))}).to_dot().strip()\n expected = dedent('''\n digraph graphname {\n \"Job('A')\";\n \"Job('C')\";\n \"Job('B')\";\n \"Job('E')\";\n \"Job('D')\";\n \"Job('F')\";\n \"Job('A')\" -> \"Job('D')\";\n \"Job('C')\" -> \"Job('D')\";\n \"Job('B')\" -> \"Job('D')\";\n \"Job('E')\" -> \"Job('B')\";\n \"Job('E')\" -> \"Job('F')\";\n }\n ''').strip()\n\n self.assertEqual(result, expected)\n\n def test_pipeline_should_propagate_data_among_jobs(self):\n job_1 = Job('w1')\n job_2 = Job('w2')\n job_3 = Job('w3')\n pipeline_data = {'python': 42}\n pipeline = Pipeline({job_1: job_2, job_2: job_3}, data=pipeline_data)\n self.assertEqual(pipeline.data, pipeline_data)\n self.assertEqual(job_1.data, pipeline_data)\n self.assertEqual(job_2.data, pipeline_data)\n self.assertEqual(job_3.data, pipeline_data)\n self.assertEqual(job_1.pipeline, pipeline)\n self.assertEqual(job_2.pipeline, pipeline)\n self.assertEqual(job_3.pipeline, pipeline)\n\n def test_pipeline_add_finished_job(self):\n job_1 = Job('w1')\n job_2 = Job('w2')\n job_3 = Job('w3')\n pipeline_data = {'python': 42}\n pipeline = PipelineForPipeliner({job_1: job_2, job_2: job_3},\n data=pipeline_data)\n job_4 = Job('w4')\n\n self.assertFalse(pipeline.finished_job(job_1))\n self.assertFalse(pipeline.finished_job(job_2))\n self.assertFalse(pipeline.finished_job(job_3))\n\n pipeline.add_finished_job(job_1)\n self.assertTrue(pipeline.finished_job(job_1))\n self.assertFalse(pipeline.finished_job(job_2))\n self.assertFalse(pipeline.finished_job(job_3))\n\n pipeline.add_finished_job(job_2)\n self.assertTrue(pipeline.finished_job(job_1))\n self.assertTrue(pipeline.finished_job(job_2))\n self.assertFalse(pipeline.finished_job(job_3))\n\n pipeline.add_finished_job(job_3)\n self.assertTrue(pipeline.finished_job(job_1))\n self.assertTrue(pipeline.finished_job(job_2))\n self.assertTrue(pipeline.finished_job(job_3))\n\n with self.assertRaises(ValueError):\n pipeline.add_finished_job(job_4) # job not in pipeline\n with self.assertRaises(RuntimeError):\n pipeline.add_finished_job(job_3) # already finished\n\n def test_pipeline_finished(self):\n job_1 = Job('w1')\n job_2 = Job('w2')\n job_3 = Job('w3')\n pipeline_data = {'python': 42}\n pipeline = PipelineForPipeliner({job_1: job_2, job_2: job_3},\n data=pipeline_data)\n\n self.assertFalse(pipeline.finished())\n pipeline.add_finished_job(job_1)\n self.assertFalse(pipeline.finished())\n pipeline.add_finished_job(job_2)\n self.assertFalse(pipeline.finished())\n pipeline.add_finished_job(job_3)\n self.assertTrue(pipeline.finished())\n\n def test_default_attributes(self):\n pipeline = Pipeline({Job('test'): None})\n self.assertEqual(pipeline.data, None)\n self.assertEqual(pipeline.id, None)\n self.assertEqual(pipeline.jobs, (Job('test'),))\n self.assertEqual(pipeline.sent_jobs, set())\n\n def test_available_jobs(self):\n job_1 = Job('w1')\n job_2 = Job('w2')\n job_3 = Job('w3')\n pipeline_data = {'python': 42}\n pipeline = PipelineForPipeliner({job_1: job_2, job_2: job_3},\n data=pipeline_data)\n\n expected = [job_1]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_1)\n expected = [job_2]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_2)\n expected = [job_3]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_3)\n self.assertEqual(pipeline.available_jobs(), set())\n\n job_4, job_5, job_6, job_7 = Job('w4'), Job('w5'), Job('w6'), Job('w7')\n job_8, job_9, job_10 = Job('8'), Job('9'), Job('10')\n job_11, job_12, job_13 = Job('11'), Job('12'), Job('13')\n job_14, job_15, job_16 = Job('14'), Job('15'), Job('16')\n pipeline_data = {'python': 42}\n pipeline = PipelineForPipeliner({job_1: (job_2, job_3),\n job_2: (job_4, job_16),\n job_3: job_4,\n job_4: job_5,\n job_5: (job_6, job_7, job_8, job_9),\n (job_6, job_7, job_8): job_10,\n (job_10, job_11): (job_12, job_13,\n job_14),\n job_15: None},\n data=pipeline_data)\n\n expected = [job_1, job_11, job_15]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n self.assertEqual(pipeline.available_jobs(), set(pipeline.starters))\n\n pipeline.add_finished_job(job_1)\n expected = [job_11, job_15, job_2, job_3]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_2)\n expected = [job_11, job_15, job_3, job_16]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_3)\n expected = [job_11, job_15, job_4, job_16]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_16)\n expected = [job_11, job_15, job_4]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_4)\n expected = [job_11, job_15, job_5]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_11)\n expected = [job_15, job_5]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_5)\n expected = [job_15, job_6, job_7, job_8, job_9]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_6)\n expected = [job_15, job_7, job_8, job_9]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_15)\n expected = [job_7, job_8, job_9]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_7)\n expected = [job_8, job_9]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_9)\n expected = [job_8]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_8)\n expected = [job_10]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_10)\n expected = [job_12, job_13, job_14]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_13)\n expected = [job_12, job_14]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_12)\n expected = [job_14]\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n pipeline.add_finished_job(job_14)\n expected = []\n self.assertEqual(pipeline.available_jobs(), set(expected))\n\n self.assertTrue(pipeline.finished())\n\n def test_serialize(self):\n job_1, job_2, job_3, job_4 = (Job('spam'), Job('eggs'), Job('ham'),\n Job('python'))\n pipeline = Pipeline({job_1: job_2, job_2: (job_3, job_4)})\n result = pipeline.serialize()\n expected = {'graph': ((job_1.serialize(), job_2.serialize()),\n (job_2.serialize(), job_3.serialize()),\n (job_2.serialize(), job_4.serialize())),\n 'data': None}\n expected = tuple(expected.items())\n\n result = dict(result)\n expected = dict(expected)\n result['graph'] = dict(result['graph'])\n expected['graph'] = dict(expected['graph'])\n self.assertEqual(result, expected)\n\n pipeline = Pipeline({job_1: job_2}, data={'python': 42})\n self.assertEqual(pipeline, Pipeline.deserialize(pipeline.serialize()))\n\n def test_deserialize(self):\n job_1, job_2, job_3, job_4, job_5 = (Job('spam'), Job('eggs'),\n Job('ham'), Job('python'),\n Job('answer_42'))\n pipeline = Pipeline({job_1: job_2, job_2: (job_3, job_4), job_5: None},\n data={'key': 42})\n serialized = pipeline.serialize()\n new_pipeline = Pipeline.deserialize(serialized)\n self.assertEqual(pipeline, new_pipeline)\n self.assertEqual(serialized, new_pipeline.serialize())\n\n def test_equal_not_equal_hash(self):\n job_1, job_2, job_3, job_4 = (Job('spam'), Job('eggs'), Job('ham'),\n Job('python'))\n pipeline_1 = Pipeline({job_1: job_2, job_2: (job_3, job_4)})\n pipeline_2 = Pipeline({job_1: job_2, job_2: (job_3, job_4)})\n pipeline_3 = Pipeline({job_1: job_2, job_2: job_3, job_3: job_4})\n self.assertTrue(pipeline_1 == pipeline_2)\n self.assertTrue(pipeline_2 == pipeline_1)\n self.assertTrue(pipeline_1 != pipeline_3)\n self.assertTrue(pipeline_3 != pipeline_1)\n\n my_set = set([pipeline_1, pipeline_2, pipeline_3]) #test __hash__\n self.assertIn(pipeline_1, my_set)\n self.assertIn(pipeline_2, my_set)\n self.assertIn(pipeline_3, my_set)\n\n pipeline_with_data = Pipeline({job_1: job_2, job_2: (job_3, job_4)},\n data={'python': 42})\n pipeline_with_data_2 = Pipeline({job_1: job_2, job_2: (job_3, job_4)},\n data={'python': 42})\n self.assertTrue(pipeline_with_data == pipeline_with_data_2)\n self.assertTrue(pipeline_with_data_2 == pipeline_with_data)\n self.assertTrue(pipeline_1 != pipeline_with_data)\n self.assertTrue(pipeline_with_data != pipeline_1)\n\ndef run_in_parallel(function, args=tuple()):\n pool = Pool(processes=1)\n result = pool.apply_async(function, args)\n return result, pool\n\ndef send_pipeline():\n pipeline = Pipeline({Job(u'worker_1'): Job(u'worker_2'),\n Job(u'worker_2'): Job(u'worker_3')})\n pipeline_manager = PipelineManager(api='tcp://localhost:5550',\n broadcast='tcp://localhost:5551')\n before = pipeline.id\n pipeline_id = pipeline_manager.start(pipeline)\n pipeline_manager.disconnect()\n return before, pipeline_id, pipeline.id\n\ndef send_pipeline_and_wait_finished():\n import time\n\n pipeline = Pipeline({Job(u'worker_1'): Job(u'worker_2'),\n Job(u'worker_2'): Job(u'worker_3')})\n pipeline_manager = PipelineManager(api='tcp://localhost:5550',\n broadcast='tcp://localhost:5551')\n pipeline_manager.start(pipeline)\n start = time.time()\n while not pipeline_manager.finished(pipeline):\n time.sleep(0.1)\n end = time.time()\n pipeline_manager.disconnect()\n return {'duration': pipeline.duration, 'real_duration': end - start}\n\ndef verify_PipelineManager_exceptions():\n pipeline_1 = Pipeline({Job(u'worker_1'): Job(u'worker_2'),\n Job(u'worker_2'): Job(u'worker_3')})\n pipeline_2 = Pipeline({Job(u'worker_1'): Job(u'worker_2')})\n pipeline_manager = PipelineManager(api='tcp://localhost:5550',\n broadcast='tcp://localhost:5551')\n pipeline_manager.start(pipeline_1)\n raise_1, raise_2 = False, False\n try:\n pipeline_manager.start(pipeline_1)\n except ValueError:\n raise_1 = True\n try:\n pipeline_manager.finished(pipeline_2)\n except ValueError:\n raise_2 = True\n\n pipeline_manager.disconnect()\n return {'raise_1': raise_1, 'raise_2': raise_2,\n 'started_at': pipeline_1.started_at}\n\nclass PipelineManagerTest(unittest.TestCase):\n def setUp(self):\n self.context = zmq.Context()\n self.start_router_sockets()\n self.pipeline = Pipeline({Job(u'worker_1'): Job(u'worker_2'),\n Job(u'worker_2'): Job(u'worker_3')})\n\n def tearDown(self):\n self.close_sockets()\n self.context.term()\n\n def start_router_sockets(self):\n self.api = self.context.socket(zmq.REP)\n self.broadcast = self.context.socket(zmq.PUB)\n self.api.bind('tcp://127.0.0.1:5550')\n self.broadcast.bind('tcp://127.0.0.1:5551')\n\n def close_sockets(self):\n self.api.close()\n self.broadcast.close()\n\n def test_should_send_add_pipeline_with_serialized_pipeline(self):\n result, pool = run_in_parallel(send_pipeline)\n message = self.api.recv_json()\n received = Pipeline.deserialize(message['pipeline']).serialize()\n expected = self.pipeline.serialize()\n self.assertEqual(set(message.keys()), set(['command', 'pipeline']))\n self.assertEqual(message['command'], 'add pipeline')\n self.assertEqual(received, expected)\n\n pipeline_id = uuid4().hex\n self.api.send_json({'answer': 'pipeline accepted',\n 'pipeline id': pipeline_id})\n result.get()\n pool.terminate()\n\n def test_should_save_pipeline_id_on_pipeline_object(self):\n result, pool = run_in_parallel(send_pipeline)\n message = self.api.recv_json()\n pipeline_id = uuid4().hex\n self.api.send_json({'answer': 'pipeline accepted',\n 'pipeline id': pipeline_id})\n received = result.get()\n pool.terminate()\n self.assertEqual(received, (None, pipeline_id, pipeline_id))\n\n def test_should_subscribe_to_broadcast_to_wait_for_finished_pipeline(self):\n result, pool = run_in_parallel(send_pipeline_and_wait_finished)\n message = self.api.recv_json()\n pipeline_id = uuid4().hex\n self.api.send_json({'answer': 'pipeline accepted',\n 'pipeline id': pipeline_id})\n sleep(1)\n self.broadcast.send('pipeline finished: id={}, duration=1.23456'\\\n .format(pipeline_id))\n received = result.get()\n pool.terminate()\n self.assertEqual(received['duration'], 1.23456)\n self.assertTrue(received['real_duration'] > 1)\n\n def test_should_raise_ValueError_in_some_cases(self):\n result, pool = run_in_parallel(verify_PipelineManager_exceptions)\n message = self.api.recv_json()\n pipeline_id = uuid4().hex\n self.api.send_json({'answer': 'pipeline accepted',\n 'pipeline id': pipeline_id})\n start_time = time()\n received = result.get()\n pool.terminate()\n self.assertTrue(received['raise_1'])\n self.assertTrue(received['raise_2'])\n started_at = received['started_at']\n self.assertTrue(start_time - 0.1 <= started_at <= start_time + 0.1)\n","sub_path":"tests/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":22414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212406833","text":"\"\"\"\nDjango settings for mysite project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\ndef patch_broken_pipe_error():\n \"\"\"Monkey Patch BaseServer.handle_error to not write\n a stacktrace to stderr on broken pipe.\n http://stackoverflow.com/a/22618740/362702\"\"\"\n import sys\n from SocketServer import BaseServer\n from wsgiref import handlers\n\n handle_error = BaseServer.handle_error\n log_exception = handlers.BaseHandler.log_exception\n\n def is_broken_pipe_error():\n type, err, tb = sys.exc_info()\n return repr(err) == \"error(32, 'Broken pipe')\"\n\n def my_handle_error(self, request, client_address):\n if not is_broken_pipe_error():\n handle_error(self, request, client_address)\n\n def my_log_exception(self, exc_info):\n if not is_broken_pipe_error():\n log_exception(self, exc_info)\n\n BaseServer.handle_error = my_handle_error\n handlers.BaseHandler.log_exception = my_log_exception\n\npatch_broken_pipe_error()\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'jrc+hcry)#^$42=_1!9lb65shd9dxi-2=ixxlb8w-1*3f*1*_$'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\nTHUMBNAIL_DEBUG = True\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'polls',\n 'photo',\n 'signups',\n 'debug_toolbar',\n 'easy_thumbnails',\n 'easy_thumbnails.optimize',\n #'south',\n #'sorl.thumbnail',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'mysite.urls'\n\nWSGI_APPLICATION = 'mysite.wsgi.application'\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (50, 50), 'crop': True},\n 'small': {'size': (100, 100), 'crop': True},\n 'medium': {'size': (150, 150), 'crop': True},\n 'large': {'size': (200, 200), 'crop': True}\n },\n }\n\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n 'LOCATION': 'my_cache_table',\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'userfiles')\nMEDIA_URL = '/photos/'\n\nTEMPLATE_DIRS = (\n os.path.join(os.path.dirname(BASE_DIR), \"static\", \"templates\"),\n \n)\n\nif DEBUG:\n MEDIA_URL='/media/'\n MEDIA_ROOT=os.path.join(os.path.dirname(BASE_DIR), \"static\", \"media\")\n STATIC_ROOT=os.path.join(os.path.dirname(BASE_DIR), \"static\", \"static-only\")\n STATICFILES_DIRS=(\n os.path.join(os.path.dirname(BASE_DIR), \"static\", \"static\"),\n \n)\n","sub_path":"mysite/mysite/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"462603035","text":"from django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass Link(models.Model):\n STATUS_NOMARL = 1\n STATUS_DELETE = 0\n # STATUS_DRAFT = 2\n STATUS_ITEMS = (\n (STATUS_NOMARL, '正常'),\n (STATUS_DELETE, '删除'),\n # (STATUS_DRAFT, '草稿')\n )\n title = models.CharField(max_length=50, verbose_name=\"标题\")\n href = models.URLField(verbose_name=\"链接\") #默认长度为200\n status = models.PositiveIntegerField(default=STATUS_NOMARL,\n choices=STATUS_ITEMS, verbose_name=\"状态\")\n weight = models.PositiveIntegerField(default=1, choices=zip(range(1, 6),\n range(1,6)),\n verbose_name=\"权重\",\n help_text=\"权重高表示顺序靠前\"\n )\n owner = models.ForeignKey(User, verbose_name=\"作者\")\n create_time = models.DateTimeField(auto_now_add=True, verbose_name=\"创建时间\")\n\n class Meta():\n verbose_name = verbose_name_plural = '友链'\n # ordering = ['-id'] # 根据id进行降序排列\n\n\nclass SideBar(models.Model):\n STATUS_SHOW = 1\n STATUS_HIDE = 0\n # STATUS_DRAFT = 2\n STATUS_ITEMS = (\n (STATUS_SHOW, '展示'),\n (STATUS_HIDE, '隐藏'),\n # (STATUS_DRAFT, '草稿')\n )\n SIDE_TYPE = (\n (1, 'HTML'),\n (2, '最新文章'),\n (3, '最热文章'),\n (4, '最近评论'),\n )\n title = models.CharField(max_length=50, verbose_name=\"标题\")\n display_type = models.PositiveIntegerField(default=1, choices=SIDE_TYPE, verbose_name=\"展示类型\")\n content = models.CharField(max_length=500, blank=True, verbose_name=\"内容\", help_text=\"如设置的不是HTML类型,可为空\")\n status = models.PositiveIntegerField(default=STATUS_SHOW,\n choices=STATUS_ITEMS, verbose_name=\"状态\")\n owner = models.ForeignKey(User, verbose_name=\"作者\")\n create_time = models.DateTimeField(auto_now_add=True, verbose_name=\"创建时间\")\n\n class Meta():\n verbose_name = verbose_name_plural = '侧边栏'\n # ordering = ['-id'] # 根据id进行降序排列\n\n","sub_path":"config/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"223006350","text":"\n\ndef get_score(self, document, index, average_idf):\n score = 0\n for word in document:\n if (word not in self.f[index]):\n continue\n idf = (self.idf[word] if (self.idf[word] >= 0) else (EPSILON * average_idf))\n score += (((idf * self.f[index][word]) * (PARAM_K1 + 1)) / (self.f[index][word] + (PARAM_K1 * ((1 - PARAM_B) + ((PARAM_B * len(document)) / self.avgdl)))))\n return score\n","sub_path":"Data Set/bug-fixing-2/24a692b2f1160ff699f65ac26fc840d115d17155--fix.py","file_name":"24a692b2f1160ff699f65ac26fc840d115d17155--fix.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"649849123","text":"# -*- coding: utf-8 -*-\n# @time : 10/12/2019 8:31 上午\n# @author : ssdcxy\n# @email : 18379190862@163.com\n# @description: \n\n\norigin_data_path = \"data/phase2_train.csv\"\nreal_data_path = \"data/real/\"\nimg_path = \"img/\"","sub_path":"TimeSerialAnalysis/KPI/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"356314131","text":"# -------------------------------\n# Group Name: ??\n# source mapping\n# 11032020\n# Modified from Week10_starterscript.py\n# Modified form Hull_HW10_maps.py\n# -------------------------------\n# %%\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nfrom shapely.geometry import Point\n# import contextily as ctx\nfrom pprint import pprint\nimport group_functions as gf\n\n\n# %%\n# 1) create a df of files, filepath, and name\nfilepath = '../../spatial_data_nongit/'\ngpd_df = pd.DataFrame(columns=['names', 'file', 'gpd'])\n\nnames = ['gages', 'rivers', 'gwsi', 'huc', 'az']\n\ngpd_df['names'] = names\n\nfilenames = ['gagesII_9322_point_shapefile/gagesII_9322_sept30_2011.shp',\n 'USA_Rivers_and_Streams-shp/'\n '9ae73184-d43c-4ab8-940a-c8687f61952f2020328-1-r9gw71.0odx9.shp',\n 'GWSI_ZIP_10162020/Shape/GWSI_SITES.shp',\n 'Shape/WBDHU10.shp',\n 'tl_2016_04_cousub/tl_2016_04_cousub.shp']\n\ngpd_df['file'] = filenames\ngpd_df['file'] = filepath + gpd_df['file']\n\n# Import data into df and add to dataframe\nfor i in range(len(gpd_df)):\n gpd_df.iat[i, 2] = gpd.read_file(gpd_df['file'].iloc[i])\n\n# %%\n# 2) Add some points\n# Stream gauge: 34.44833333, -111.7891667\npoint_list = np.array([[-111.7891667, 34.44833333]])\n\n# extract crs from huc (desired to apply)\ncrs_in = gpd_df['gpd'].iloc[\n gpd_df.index[\n gpd_df['names'\n ] == 'huc'].tolist()[0]\n ].crs\n\n# add a line containing point_list to gpd_df including data frame of points\ngpd_df = gf.add_pt_gdf(point_list, crs_in, gpd_df, 'points_df')\n\n# %%\n# 3) fix any crs issues\n# 4) Clip data extent for all layers based on extent of arizona\n\n# clip set\nclip_set = gpd_df['gpd'].iloc[gpd_df.index[gpd_df['names'] ==\n 'az'].tolist()[0]]\n\n# crs set\n# exract crs to set all to (from gages)\ncrs_set = clip_set.crs\n\n# look through all gdf and (a) fix crs issues and (b) clip domains\nfor i in range(len(gpd_df)):\n gpd_df['gpd'].iloc[i].to_crs(crs_set, inplace=True)\n pprint(gpd_df['gpd'].iloc[i].crs)\n gpd_df.iat[i, 2] = gpd.clip(gpd_df['gpd'].iloc[i], clip_set, False)\n\n\n# %%\n# 6) Make a map of just Verde River Area\nextent = gpd_df['gpd'].iloc[1][gpd_df['gpd'].iloc[1]['Name'] == 'Verde River']\nsalt = gpd_df['gpd'].iloc[1][gpd_df['gpd'].iloc[1]['Name'] == 'Salt River']\n\nrangef = (extent.total_bounds[2] - extent.total_bounds[0])\n# clip extent\nxmin, xmax, ymin, ymax = extent.total_bounds[0]-rangef, \\\n extent.total_bounds[2]+rangef, \\\n extent.total_bounds[1]-rangef, \\\n extent.total_bounds[3]+rangef\n\n# create plot\nfig, ax = plt.subplots(figsize=(10, 10))\nax.set_xlim(xmin, xmax)\nax.set_ylim(ymin, ymax)\n\ncolorList = ['red', 'blue', 'green', 'grey', 'black', 'yellow']\nalphaList = [1, 1, 0.2, 0.5, 0.7, 1]\nzorderList = [6, 4, 3, 2, 1, 7]\nnameList = ['USGS Stream Gages', 'Arizona Major Rivers',\n 'USGS Groundwater Sites', 'HUC 10 Watersheds',\n 'AZ', 'Verde River Stream Gage']\nmarkerList = ['X', '_', '.', '_', '_', '*']\n\n# loop through all gdps:\nfor i in range(len(gpd_df)):\n if gpd_df['names'].iloc[i] == 'huc':\n gpd_df['gpd'].iloc[i].boundary.plot(ax=ax,\n label=nameList[i],\n zorder=zorderList[i],\n edgecolor=colorList[i],\n alpha=alphaList[i])\n\n else:\n gpd_df['gpd'].iloc[i].plot(ax=ax,\n label=nameList[i],\n zorder=zorderList[i],\n color=colorList[i],\n alpha=alphaList[i],\n marker=markerList[i]\n )\n\n# plot just Verde River\nextent.plot(ax=ax,\n label='Verde River',\n zorder=5,\n color='purple',\n linestyle='-.',\n linewidth=5)\n\n# plot just Salt River\nsalt.plot(ax=ax,\n label='Salt River',\n zorder=5,\n color='orange',\n linestyle=':',\n linewidth=5,\n alpha=0.5)\n\nax.set_title('Arizona Hydrologic Features')\nax.set_xlabel('Longitude')\nax.set_ylabel('Latitude')\nax.legend(loc='lower left')\nplt.show()\n\n# %%\n","sub_path":"team_forecast/source_MAP.py","file_name":"source_MAP.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"392268179","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 11 10:38:13 2018\r\n\r\n@author: zmohamadazri\r\n\"\"\"\r\nimport os\r\nimport shutil\r\nfrom os.path import join, dirname, abspath, exists\r\nimport pandas as pd\r\nfrom requests.exceptions import HTTPError, ConnectionError, ReadTimeout\r\nfrom selenium.common.exceptions import TimeoutException, WebDriverException\r\nimport urllib3\r\nurllib3.disable_warnings()\r\nimport datetime\r\n\r\nimport sourcecode\r\nimport packagetools.urlaccess as urlaccess\r\nimport packagetools.convertfiles as convert\r\nfrom packagetools.sendemail import SendEmail\r\nfrom packagetools.datachanges import CheckingResult, ExcelChanges, consolidate\r\n\r\ndef run_url_checking(masterfile):\r\n starttime = datetime.datetime.now()\r\n print(starttime.strftime(\"%d %b %Y %I:%M:%S %p\"))\r\n \r\n #define all related files\r\n masterfolder = dirname(abspath(__file__))\r\n \r\n excel1 = join(masterfolder, masterfile)\r\n excel2 = join(masterfolder, \"Report.xlsx\")\r\n excel3 = join(masterfolder, \"Email.xlsx\")\r\n htmlfile = join(masterfolder, \"Email.html\")\r\n mdbfile = join(masterfolder, \"Automation.mdb\")\r\n\r\n #convert masterfile to dataframe\r\n df1 = pd.read_excel(excel1, sheet_name='Sheet1')\r\n df12 = pd.read_excel(excel1, sheet_name = 'Sheet2', index_col=0, header=None)\r\n \r\n #create dataframe for use in report attachment\r\n df2 = pd.DataFrame(columns=['Source','STP Name', 'New Timepoint','Previous Timepoint', 'Changes Type', \r\n 'Key', 'Frequency', 'Level', 'System ID', 'Method', 'Remark', 'Requested Time'])\r\n \r\n #clear columns value on the masterfile\r\n df1['TimePoint Source'] = None\r\n df1['Changes Type'] = None\r\n df1['Status'] = None\r\n df1['Last Timepoint'] = None\r\n \r\n fromaddress = str(df12.loc['From'].get(1)).strip()\r\n toaddress = str(df12.loc['To'].get(1)).strip()\r\n ccaddress = str(df12.loc['CC'].get(1)).strip()\r\n countrycode = str(df12.loc['Country Code'].get(1)).strip()\r\n \r\n consolfolder = join(masterfolder, 'Consolidated Report')\r\n if not exists(consolfolder):\r\n os.makedirs(consolfolder)\r\n else:\r\n pass\r\n excel4 = join(consolfolder, countrycode+' Consolidated Report.xlsx')\r\n \r\n #open selenium webdriver\r\n driver = urlaccess.openwebdriver()\r\n \r\n #iterate row by row through the dataframe\r\n for i in df1.index:\r\n url = df1.loc[i, 'URL']\r\n indicator_name = df1.loc[i, 'Indicator']\r\n stpname = df1.loc[i, 'STP Name']\r\n ref = df1.loc[i, 'Ref']\r\n timepoint1 = df1.loc[i, 'Current TimePoint']\r\n save_path = join(masterfolder, 'file')\r\n \r\n c = CheckingResult(i, df1, df2)\r\n \r\n try:\r\n #check the latest timepoint\r\n last_update = sourcecode.checkupdate(url, indicator_name, stpname, save_path, driver, timepoint1, ref)\r\n \r\n df1.loc[i, 'TimePoint Source'] = str(last_update)\r\n \r\n #check whether there are a new updates or failed and make changes in the dataframe\r\n if df1.loc[i, 'TimePoint Source'] == '' or df1.loc[i, 'TimePoint Source'] == None:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n elif df1.loc[i, 'Current TimePoint'] != df1.loc[i, 'TimePoint Source']:\r\n c.updatedetected()\r\n c.updatemdb(mdbfile, countrycode)\r\n else:\r\n c.uptodate()\r\n \r\n #error handler\r\n except AttributeError:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n except NameError:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n except HTTPError:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n except ConnectionError:\r\n c.failed('Fail - Connection unstable')\r\n except ReadTimeout:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n except TimeoutException:\r\n c.failed('Fail - Connection unstable')\r\n except WebDriverException:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n except Exception:\r\n c.failed('Fail - Website Layout Change/Server Down')\r\n \r\n print(str(i+1)+' '+str(df1.loc[i,'STP Name'])+'\\n'+str(df1.loc[i,'Changes Type'])+'\\n')\r\n \r\n #close selenium webdriver\r\n driver.quit()\r\n \r\n #count number of url, new releases, and failed\r\n newreleases = len(df1[df1['Changes Type'] == 'New Detected'])\r\n failedreleases = len(df1[df1['Changes Type'] != 'Up to date']) - len(df1[df1['Changes Type'] == 'New Detected'])\r\n allurl = len(df1)\r\n \r\n #convert dataframe back to excel masterfile\r\n convert.dftomasterfile(excel1, df1)\r\n \r\n #write and convert email excel body to html if new releases or failed and send email\r\n serverhost = 'ceicdata-com.mail.protection.outlook.com'\r\n email = SendEmail(serverhost, fromaddress, toaddress, ccaddress, 10)\r\n \r\n if newreleases != 0 or failedreleases != 0:\r\n convert.dftoreport(excel2, df2.drop(['Requested Time'], axis=1))#convert dataframe to excel report attachment\r\n ec = ExcelChanges(excel2, excel3, df1)\r\n ec.reporttoemail()#write email body in excel\r\n convert.exceltohtml(excel3, htmlfile)#convert excel email body to html\r\n \r\n if newreleases != 0:\r\n email.sendmail(excel2, htmlfile, newreleases, failedreleases, allurl, 'Alert! | '+countrycode+'_Release Detected_')\r\n consolidate(df2, excel4)\r\n else:\r\n email.sendmail(excel2, htmlfile, newreleases, failedreleases, allurl, 'Failed | '+countrycode+'_No Release Detected_')\r\n \r\n else:\r\n htmlfile = None\r\n email.sendmail(excel2, htmlfile, newreleases, failedreleases, allurl, 'All Up To Date | '+countrycode+'_No Release Detected_')\r\n \r\n #remove report attachment, excel and html email body\r\n urlaccess.deletefile(excel2)\r\n urlaccess.deletefile(excel3)\r\n urlaccess.deletefile(htmlfile)\r\n \r\n endtime = datetime.datetime.now()\r\n print(endtime.strftime(\"%d %b %Y %I:%M:%S %p\"))\r\n \r\n print('\\nTotal Running Time: ' + str(endtime-starttime))\r\n \r\nif __name__ == \"__main__\":\r\n masterfile = \"URL Checking.xlsx\"\r\n run_url_checking(masterfile)\r\n \r\n user = os.getlogin()\r\n tempfolder = 'C:\\\\Users\\\\'+ user+'\\\\AppData\\\\Local\\\\Temp'\r\n \r\n for allfiles in os.listdir(tempfolder):\r\n if allfiles.startswith('scoped_dir'):\r\n file_path = os.path.join(tempfolder, allfiles)\r\n try:\r\n shutil.rmtree(file_path)\r\n except Exception as e:\r\n pass\r\n","sub_path":"HKG1/mainfile.py","file_name":"mainfile.py","file_ext":"py","file_size_in_byte":6680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109266304","text":"from rest_framework import serializers\nfrom apps.users.models import User\nfrom apps.markets.models import Market\n\nfrom .models import Report\n\n\nclass ReportSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Report\n fields = ('report_content', 'time_stamp', 'market', 'reported_user')\n\n def create(self, validated_data):\n validated_data['user'] = self.context.get('request').user\n report = Report.objects.create(**validated_data)\n return report\n\n def to_representation(self, instance):\n repr = super(ReportSerializer, self).to_representation(instance)\n repr['market'] = str(Market.objects.get(pk=repr['market']))\n repr['reported_user'] = str(User.objects.get(pk=repr['reported_user']))\n return repr","sub_path":"webserver/apps/reports/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"325868477","text":"import function.neural_network as nn\nimport numpy as np\nimport importlib\nimport minst.open as minst\nimport time\n\n\nif __name__ == \"__main__\":\n\n Act = 'LReLU'\n Out = 'softmax'\n\n TrainI = minst.readMINIST('minst/train-images.idx3-ubyte')\n TrainImage = TrainI.getImage()\n TrainImage = minst.image(TrainImage)\n TrainL = minst.readMINIST('minst/train-labels.idx1-ubyte')\n TrainLabel = TrainL.getLabel()\n TrainLabel = minst.hot_encoding(TrainLabel)\n\n TestI = minst.readMINIST('minst/t10k-images.idx3-ubyte')\n TestImage = TestI.getImage()\n TestImage = minst.image(TestImage)\n TestL = minst.readMINIST('minst/t10k-labels.idx1-ubyte')\n TestLabel = TestL.getLabel()\n TestLabel = minst.hot_encoding(TestLabel)\n\n Path = 'model/cnn_double_dropout_0.2_shuffle.npy'\n DeepNet = nn.load(Path)\n DeepNet.method(shuffle=False)\n\n try:\n\n Para = input('leanring rate and epochs\\n').split()\n Rate = float(Para[0])\n Epoch = int(Para[1])\n\n while 1:\n\n TrainError = DeepNet.cnn_train(TrainImage, TrainLabel, Rate, Epoch)\n time1 = time.time()\n Result = DeepNet.predict(TestImage, TestLabel)\n time2 = time.time()\n print(Result[1], Result[2])\n print(Rate, Epoch)\n print(time2 - time1)\n\n nn.save(DeepNet, Path)\n\n Argv = input('want to end?\\n')\n if Argv == '1':\n break\n else:\n Para = Argv.split()\n Rate = float(Para[0])\n Epoch = int(Para[1])\n\n except KeyboardInterrupt:\n nn.save(DeepNet, Path)\n Result = DeepNet.predict(TestImage, TestLabel)\n print(Result[1], Result[2])\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320502668","text":"from sample.settings import *\nimport torch.nn as nn\nfrom sample.helpers import load_dataset,weights_init\nfrom sample.Discriminator import Discriminator\nfrom sample.Generator import Generator\nimport torch.optim as optim\n\nclass FaceGan() :\n\n def __init__(self):\n self.dataset, self.dataloader, self.device = load_dataset()\n\n self.netG = Generator(ngpu).to(self.device)\n self.netD = Discriminator(ngpu).to(self.device)\n\n # Initialise Weights\n self.netG.apply(weights_init)\n self.netD.apply(weights_init)\n\n\n # define loss function\n self.criterion = nn.BCELoss()\n\n # We create a fixed subset of random for the latent variable, this way we can evaluate our progress.\n self.fixed_noise = torch.randn(n_visualisation, nz, 1, 1, device=self.device)\n\n # Establish convention for real and fake labels during training\n self.real_label = 1\n self.fake_label = 0\n\n # Setup Adam optimizers for both G and D\n self.optimizerD = optim.Adam(self.netD.parameters(), lr=lr, betas=(beta1, 0.999))\n self.optimizerG = optim.Adam(self.netG.parameters(), lr=lr, betas=(beta1, 0.999))\n\n\n # Fixed noise for visualisation\n self.fixed_noise = torch.randn(64, nz, 1, 1, device=self.device)\n\n def train(self):\n # Training Loop\n\n # Lists to keep track of progress\n img_list = []\n G_losses = []\n D_losses = []\n iters = 0\n\n print(\"Starting Training Loop...\")\n # For each epoch\n for epoch in range(num_epochs):\n # For each batch in the dataloader\n for i, data in enumerate(self.dataloader, 0):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n\n ## Reset the grad for the Discriminator Network\n self.netD.zero_grad()\n\n # Format batch\n real_cpu = data[0].to(self.device) # Load the batch to gpu\n\n # Create the vector of the input size with only True = [real_label]*b_size\n b_size = real_cpu.size(0)\n label = torch.full((b_size,), self.real_label, device=self.device)\n\n # Forward pass real batch through D -> Get predictions\n output = self.netD(real_cpu).view(-1)\n # Calculate loss on all-real batch\n errD_real = self.criterion(output, label)\n\n # Calculate gradients for D in backward pass, WAIT to optimize !!!\n errD_real.backward()\n D_x = output.mean().item() # Accuracy of the classifier for True examples\n\n # Train with all-fake batch\n # Generate batch of latent vectors using noise, same number of noise than real examples\n noise = torch.randn(b_size, nz, 1, 1, device=self.device)\n # Generate fake image batch with G\n fake = self.netG(noise)\n label.fill_(self.fake_label) # Convert the previously label vector to all False this time\n # Classify all fake batch with D, have to detach so the optimization don't impact netG !\n output = self.netD(fake.detach()).view(-1)\n # Calculate D's loss on the all-fake batch\n errD_fake = self.criterion(output, label)\n # Calculate the gradients for this batch\n errD_fake.backward()\n D_G_z1 = output.mean().item() # 1-Accuracy of the classifier for generated Examples\n # Add the gradients from the all-real and all-fake batches\n errD = errD_real + errD_fake\n # We can now optimize netD\n self.optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n self.netG.zero_grad()\n label.fill_(self.real_label) # Here we use fake images\n\n output = self.netD(fake).view(-1) # We use the same fake because generator didn't changed yet\n # Calculate G's loss based on this output\n errG = self.criterion(output, label)\n # Calculate gradients for G , We only update\n errG.backward() # loos for G\n D_G_z2 = output.mean().item() # G sucess faking\n # Update G\n self.optimizerG.step()\n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (epoch, num_epochs, i, len(self.dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or ((epoch == num_epochs - 1) and (i == len(self.dataloader) - 1)):\n with torch.no_grad():\n fake = self.netG(self.fixed_noise).detach().cpu()\n viz.images((fake * 0.5 + 0.5).clamp(min=0, max=1),win='generated images')\n\n iters += 1\n\nif __name__=='__main__':\n faceGan = FaceGan()\n faceGan.train()","sub_path":"Gans/MicroGans/MicroGan_1/sample/MicroGan.py","file_name":"MicroGan.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307098778","text":"import socket\n\n# Define the IP address and the Port Number\nIP = \"172.21.72.133\"\n# IP = \"192.168.1.71\"\nPORT = 8080\n\nlisteningAddress = (IP, PORT)\n\n# Create a datagram based server socket that uses IPv4 addressing scheme\ndatagramSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ndatagramSocket.bind(listeningAddress)\n\nwhile(True):\n localization, sourceAddress = datagramSocket.recvfrom(128)\n print(localization.decode())","sub_path":"SOCKET/receivemsg.py","file_name":"receivemsg.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531510570","text":"from tqdm import tqdm\nfrom torch import nn\nimport torch.nn\nfrom torch.functional import F\nimport os\n\nos.chdir('d:\\Python Projects\\EVA')\ncwd = os.getcwd()\n\nmodel_dir = os.path.join(cwd, 'Assignment-6/saved_models/model.pth')\n\ndef model_training(model, device, train_dataloader, optimizer, train_acc, train_losses, l1_loss=False):\n \n model.train()\n pbar = tqdm(train_dataloader)\n correct = 0\n processed = 0\n\n for batch_idx, (data, target) in enumerate(pbar):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n y_pred = model(data)\n loss = F.nll_loss(y_pred, target)\n\n # IF L1 Loss\n if l1_loss:\n lambda_l1 = 0.0001\n l1 = 0\n for p in model.parameters():\n l1 = l1 + p.abs().sum()\n loss = loss + lambda_l1*l1\n \n train_losses.append(loss)\n loss.backward()\n optimizer.step()\n\n pred = y_pred.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n processed += len(data)\n\n pbar.set_description(desc=f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')\n train_acc.append(100*correct/processed)\n torch.save(model.state_dict(), model_dir)","sub_path":"Assignment-6/model/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"57706944","text":"class Solution(object):\n \n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n\n Hmm, so how am I going to solve this???\n\n I think you can use BFS. Start from a location, and keep searching in all directions, until you have marked the entire island. Once the island has been marked, you can remove it. I guess this is what you would consider as \"sinking\" the island. Once an island has been sunk, you can increment a counter to keep track of how many islands there are\n \"\"\"\n \n def numIslands(self, grid):\n ans = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n ans += 1\n self.sink(grid, j, i)\n return ans\n \n def sink(self, grid, x, y):\n if grid[y][x] == '1':\n grid[y][x] = '0'\n if x+1 < len(grid[0]) and grid[y][x+1] == '1':\n self.sink(grid, x+1, y)\n if x-1 >= 0 and grid[y][x-1] == '1':\n self.sink(grid, x-1, y)\n if y+1 < len(grid) and grid[y+1][x] == '1':\n self.sink(grid, x, y+1)\n if y-1 >= 0 and grid[y-1][x] == '1':\n self.sink(grid, x, y-1)\n","sub_path":"leetcode/number_of_islands.py","file_name":"number_of_islands.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"431347602","text":"\"\"\"\nGiven a time in the format of hh:mm (12-hour format) 0 < hh < 12, 0 <= mm < 60. The task is to convert it into words\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case contains two space separated integers 'h' and 'm' denoting hours and minutes respectively.\n\nOutput:\nOutput the input time into words.\n\nConstraints:\n1<=T<=10^5\n1<=h<=12\n1<=m<=60\n\nExample:\nInput:\n6\n6 0\n6 10\n6 15\n6 30\n6 45\n6 47\n\nOutput:\nsix o' clock\nten minutes past six\nquarter past six\nhalf past six\nquarter to seven\nthirteen minutes to seven\n\"\"\"\n\n\ndef convert_to_word(hr, min):\n num_str = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\",\n \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"quarter\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\",\n \"twenty\", \"twenty one\", \"twenty two\", \"twenty three\", \"twenty four\", \"twenty five\", \"twenty six\",\n \"twenty seven\", \"twenty eight\", \"twenty nine\", \"past\"]\n\n if min == 0:\n print(num_str[hr], \"o' clock\")\n elif min == 30:\n print(\"half past\", num_str[hr])\n elif min == 45:\n print(\"quarter to\", num_str[hr + 1])\n elif min == 15:\n print(\"quarter past\", num_str[hr])\n elif min == 1:\n print(num_str[min] + \"minutes past\" + num_str[hr])\n elif min == 59:\n print(num_str[60 - min] + \" minute to \" + num_str[hr + 1])\n elif min > 30:\n print(num_str[60 - min] + \" minutes to \" + num_str[hr + 1])\n elif min < 30:\n print(num_str[min] + \" minutes past \" + num_str[hr])\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n hr, min = [int(i) for i in input().split()][0:2]\n convert_to_word(hr, min)\n","sub_path":"practice/school/time_to_words.py","file_name":"time_to_words.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320451697","text":"#! /usr/bin/python3\n# -*- coding:utf-8 -*-\n#\n#\tredis_read.py\n#\n#\t\t\t\t\tJul/31/2017\n#\n# --------------------------------------------------------------\nimport sys\nimport redis\nimport json\n#\nsys.path.append ('../../../common/python_common')\nfrom text_manipulate import dict_display_proc\n\n# --------------------------------------------------------------\nsys.stderr.write(\"*** 開始 ***\\n\")\n\nrr = redis.Redis(host='localhost', port=6379, db=0)\n\nkeys = rr.keys('t*')\n\nfor key in sorted(keys):\n\tstr_json = rr.get(key).decode ()\n\tunit_aa = json.loads(str_json)\n\tstr_out = key.decode()+\"\\t\"+ unit_aa['name']\n\tstr_out += \"\\t\"+ str(unit_aa['population'])\n\tstr_out += \"\\t\"+ unit_aa['date_mod']\n\tprint(str_out)\n#\nsys.stderr.write(\"*** 終了 ***\\n\")\n#\n# --------------------------------------------------------------\n","sub_path":"redis/python/read/redis_read.py","file_name":"redis_read.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"322748393","text":"import unittest\nfrom Week3.ObjectOrientedProgramming.StockReport import StockData, StockValues\n\nclass Stock_Report(unittest.TestCase):\n def test_stock_report(self):\n obj = StockData # creating the object of method class\n result = obj.stock_data(self)\n excepted = True\n if ValueError:\n self.assertFalse(result,excepted)\n else:\n obj2 = StockValues\n result1 = obj2.stock_values(self)\n excepted = True\n self.assertTrue(result1, excepted)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Testing/test_oops/test_stock_report.py","file_name":"test_stock_report.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"252871831","text":"\nimport shutil\nimport os\nimport stat\nimport time\nimport sys\n\ndef start(video_path,save_path):\n \"\"\"\n 현재 더미파일이 원본비디오폴더에 생기도록 되어있습니다.\n :param video_path: .../video_name.mp4\n :param save_path: .../faceset\n :return: 없음\n \"\"\"\n dummy_path = video_path.split('.mp4')[0]+\"faceA_dummy.mp4\" #비디오저장목적으로 추정\n #delete\n def remove_readonly(func, path, excinfo):\n os.chmod(path, stat.S_IWRITE)\n func(path)\n def remove_dir_tree(remove_dir):\n try:\n shutil.rmtree(remove_dir, ignore_errors=False, onerror=remove_readonly)\n except(PermissionError) as e: ## if failed, report it back to the user ##\n print(\"[Delete Error] %s - %s.\" % (e.filename,e.strerror))\n try:\n remove_dir_tree(save_path)\n remove_dir_tree(save_path+'_eyes')\n os.remove(dummy_path)\n except(FileNotFoundError) :\n pass\n print(\"remove file\")\n\n import mtcnn_control_ho as face_detecter\n face_detection= face_detecter.MTCNN_video_face()\n face_detection.setMedia_source_path(video_path)\n face_detection.setFaces_path(save_path)\n face_detection.setOutput_file_path(dummy_path)\n face_detection.start()\n print(\"얼굴추출완료\")\nif (len(sys.argv)<3):\n print('파라미터오류')\nelse :\n start(video_path=sys.argv[1],save_path=sys.argv[2])","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621019142","text":"from players.entity import Player\r\nfrom players.helpers import index_from_userid, playerinfo_from_userid, index_from_playerinfo, userid_from_index\r\nfrom menus import SimpleMenu\r\nfrom menus import SimpleOption\r\nfrom menus import PagedOption\r\n\r\nfrom menus import Text\r\nimport wcs\r\nfrom menus import PagedMenu\r\n\r\n\r\ndef raceinfo_menu_build(menu, index):\r\n\tmenu.clear()\r\n\traces = wcs.wcs.racedb.getAll()\r\n\tuserid = userid_from_index(index)\r\n\tplayer_entity = Player(index)\r\n\tallraces = races.keys()\r\n\tfor number, race in enumerate(allraces):\r\n\t\tif race in wcs.wcs.wcsplayers[userid].all_races:\r\n\t\t\tlevel = wcs.wcs.wcsplayers[userid].all_races[race]['level']\r\n\t\telse:\r\n\t\t\tlevel = 0\r\n\t\traceinfo = wcs.wcs.racedb.getRace(race)\r\n\t\tnol = raceinfo['numberoflevels']\r\n\t\tnos = int(raceinfo['numberofskills'])\r\n\t\tif ('|') in nol:\r\n\t\t\tnol = nol.split('|')\r\n\t\tif len(nol) == 1:\r\n\t\t\tmax_level = int(nol) * nos\r\n\t\telse:\r\n\t\t\tmax_level = 0\r\n\t\t\tfor x in nol:\r\n\t\t\t\tmax_level += int(x)\r\n\t\tlevel_buffer = level\r\n\t\tmax_level = int(max_level)\r\n\t\tif level_buffer > max_level:\r\n\t\t\tlevel_buffer = max_level\r\n\t\tif level:\r\n\t\t\toption = PagedOption('%s - [%s/%s]' % (race,level,max_level),race)\r\n\t\telse:\r\n\t\t\toption = PagedOption('%s' % str(race), race)\r\n\t\tmenu.append(option)\r\n\t\t\r\n\t\t\r\ndef raceinfo_menu_select(menu, index, choice):\r\n\trace = choice.value\r\n\traceinfo = wcs.wcs.racedb.getRace(race)\r\n\trequired = raceinfo['required']\r\n\tmaximum = raceinfo['maximum']\r\n\tallowonly = raceinfo['allowonly']\r\n\tdesc = raceinfo['desc']\r\n\tskillnames = raceinfo['skillnames'].split('|')\r\n\tskilldesc = raceinfo['skilldescr'].split('|')\r\n\tnumberofskills = int(raceinfo['numberofskills'])-1\r\n\t\r\n\traceinfo_race_menu = PagedMenu(title='Raceinfo - %s' % race,parent_menu=menu)\r\n\traceinfo_race_menu.append(Text('Required level: %s' % required))\r\n\traceinfo_race_menu.append(Text('Maximum level: %s' % maximum))\r\n\tif allowonly:\r\n\t\traceinfo_race_menu.append(Text(''))\r\n\tif desc:\r\n\t\traceinfo_race_menu.append(Text('Description: %s' % desc))\r\n\traceinfo_race_menu.append(Text('Skills:'))\r\n\tx = 0\r\n\twhile x <= numberofskills:\r\n\t\traceinfo_race_menu.append(PagedOption('%s' % skillnames[x], value=None, highlight=True, selectable=False))\r\n\t\t#raceinfo_race_menu.append(Text('o %s' % skillnames[x]))\r\n\t\tv = str(skilldesc[x]).split('+')\r\n\t\traceinfo_race_menu.append(Text('%s' % v[0]))\r\n\t\tfor y in v[1:]:\r\n\t\t\traceinfo_race_menu.append(Text('%s' % y))\r\n\t\tx +=1\r\n\traceinfo_race_menu.send(index)\r\n\t\r\nraceinfo_menu = PagedMenu(title='Raceinfo Menu',build_callback=raceinfo_menu_build, select_callback=raceinfo_menu_select)\r\n\r\ndef doCommand(userid):\r\n\tindex = index_from_userid(userid)\r\n\traces = wcs.wcs.racedb.getAll()\r\n\tallraces = races.keys()\r\n\tif len(allraces):\r\n\t\traceinfo_menu.send(index)\r\n","sub_path":"addons/source-python/plugins/wcs/raceinfo.py","file_name":"raceinfo.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"517380704","text":"from django.shortcuts import render\n\n\n# Create your views here.\n\n\n\nfrom .models import Book, Author, BookInstance, Genre\nfrom django.template import Context\n\ndef index(request):\n\n \"\"\"\n\n View function for home page of site.\n\n \"\"\"\n\n # Generate counts of some of the main objects\n\n num_books=Book.objects.all().count()\n\n num_instances=BookInstance.objects.all().count()\n\n # Available copies of books\n\n num_instances_available=BookInstance.objects.filter(status__exact='a').count()\n\n num_authors=Author.objects.count() \n\n num_visits = request.session.get('num_visits',0)\n request.session['num_visits'] = num_visits+1\n \n context={'num_books':num_books,'num_instances':num_instances,'num_instances_available':num_instances_available,\n 'num_authors':num_authors}\n\n return render(request,'index.html',context)\n\nfrom django.views import generic\n\nclass BookListView(generic.ListView):\n\tmodel=Book\n\tpaginate_by=2\n\nclass BookDetailView(generic.DetailView):\n \tmodel = Book","sub_path":"locallibrary/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432951930","text":"import numpy as np\n\ndef read_zitrin(npix,file):\n\n z_array = []\n\n with open(file,'r') as f :\n data = f.readlines()\n \n for i in range(len(data)) :\n z_array.append([float(x) for x in data[i].strip(' ').split()][:-1])\n\n f.close()\n\n z_array = np.resize(np.array(z_array),(npix[0],npix[0]))\n\n return z_array\n","sub_path":"Lensing/read_zitrin.py","file_name":"read_zitrin.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566905006","text":"import os\ndef renameFCARetail(directory):\n files = sorted(os.listdir(directory))\n for file in files:\n filename = os.fsdecode(file)\n if filename.endswith(\".csv\") and filename.startswith(\"Retail\"):\n name, _ = filename.split(\".\")\n _, dd, mm, yyyy = name.split(\"_\")\n new_file_name = yyyy + \"_\" + mm + \"_\" + dd + \".csv\"\n os.rename(directory+'/'+filename, directory+'/'+new_file_name)\n continue\n else:\n continue\n\nrenameFCARetail('.')\n","sub_path":"Data/Original/RetailFCA/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416881066","text":"import csv\nimport itertools\nimport random\n\n# Read csv file and save into four different dicts indexed by serial number\n# Create an Actions list \n# Specify DS size desired, max being nP2 where n = number of DS points\n# Create different possible nPm permutations of features, each with one of the actions\n# Save to a new DS\n\n# object1 on object2 relations\n# revolute joint on 1 - grasp - yes\n# pointed on 1 - type anything except wood or glass on 2 - pierce - yes\n\nactions = ['pierce', 'grasp']\neffects = ['pierced', 'grasped']\ncsv_file = 'obj_combo_DS.csv'\n\ncsv_file_output = 'obj_combo_labeled.csv'\ncsv_file_clean = 'obj_BNT.csv'\ncsv_pos = 'one_cls_pos.csv'\ncsv_neg = 'one_cls_neg.csv'\ncsv_bal = 'csv_balanced_pierce_grasp_no_effect.csv'\n\npos_dict = {'material_1':[], 'contact_1':[], 'joint_1':[], 'material_2':[], 'action':[], 'effect':[]}\nneg_dict = {'material_1':[], 'contact_1':[], 'joint_1':[], 'material_2':[], 'action':[], 'effect':[]}\n\none_action = 'pierce'\n\nfeatures = {}\nN_datapoints = 50000\n\nwith open(csv_file) as csvfile:\n\treader = csv.reader(csvfile)\n\tfor idx, row in enumerate(reader):\n\t\tfeatures[idx] = row[0:4]\n\nperm_list = range(0,len(features.keys()))\nperm_list = itertools.permutations(perm_list, 2)\n\nperm_list = [item for item in perm_list]\nrandom.shuffle(perm_list)\n\nif N_datapoints >= len(perm_list):\n\tdata_points = perm_list\nelse:\n\tdata_points = perm_list[0:N_datapoints+1]\n\noutput_dict = {'object1':[], 'object2':[], 'action':[], 'effect':[]}\n\nclass_nums = {'Plastic':0, 'Wood':1, 'Glass/Ceramic':2, 'Metal':3, 'Foam':4, 'flat':0, 'line':1, 'point':2, 'Fixed':0, 'Revolute':1,\n\t\t\t\t 'pierce':0, 'grasp':1, 'pierced':0, 'grasped':1, 'None':2}\n\none_class_nums = {'pierced':1, 'grasped':1, 'None':0}\ndict_classes = {'material_1':[], 'contact_1':[], 'joint_1':[], 'material_2':[], 'action':[], 'effect':[]}\n\nfor item in data_points:\n\taction_num = random.randint(0,len(actions)-1)\n\tobject1 = features[item[0]]\n\tobject2 = features[item[1]]\n\t#action = one_action\n\taction = actions[action_num]\n\teffect = 'None'\n\n\tif action == 'pierce':\n\t\tif (object1[1] == 'point' or object1[2] == 'Point') and (object1[3] == 'Metal' or object1[3] == 'Wood' or object1[3] == 'Plastic'):\n\t\t\tif object2[3] == 'Foam':\n\t\t\t\teffect = effects[0] \n\t\t#elif (object1[1] == 'point' or object1[2] == 'Point') and object1[3] == 'Plastic':\n\t\t#\tif object2[3] == 'Foam':\n\t\t#\t\teffect = effects[0]\n\telif action == 'grasp':\n\t\tif object1[2] == 'revolute' or object1[2] == 'Revolute':\n\t\t\teffect = effects[1]\n\n\tif effect == 'None':\n\t\tcontinue\n\n\toutput_dict['object1'].append(features[item[0]])\n\toutput_dict['object2'].append(features[item[1]])\n\toutput_dict['action'].append(action)\n\toutput_dict['effect'].append(effect)\n\n\tmaterial_1_class = class_nums[(features[item[0]][3]).strip()]\n\tcontact_1_class = class_nums[(features[item[0]][1]).strip()]\n\tjoint_1_class = class_nums[(features[item[0]][2]).strip()]\n\tmaterial_2_class = class_nums[(features[item[1]][3]).strip()]\n\taction_class = class_nums[action]\n\t\n\t#effect_class = one_class_nums[effect]\n\teffect_class = class_nums[effect]\n \n\tdict_classes['material_1'].append(material_1_class)\n\tdict_classes['contact_1'].append(contact_1_class)\n\tdict_classes['joint_1'].append(joint_1_class)\n\tdict_classes['material_2'].append(material_2_class)\n\tdict_classes['action'].append(action_class)\n\tdict_classes['effect'].append(effect_class)\n\n\tif action_class == 1:\n\t\tneg_dict['material_1'].append(material_1_class)\n\t\tneg_dict['contact_1'].append(contact_1_class)\n\t\tneg_dict['joint_1'].append(joint_1_class)\n\t\tneg_dict['material_2'].append(material_2_class)\n\t\tneg_dict['action'].append(action_class)\n\t\tneg_dict['effect'].append(effect_class)\n\telse:\n\t\tpos_dict['material_1'].append(material_1_class)\n\t\tpos_dict['contact_1'].append(contact_1_class)\n\t\tpos_dict['joint_1'].append(joint_1_class)\n\t\tpos_dict['material_2'].append(material_2_class)\n\t\tpos_dict['action'].append(action_class)\n\t\tpos_dict['effect'].append(effect_class)\n\n'''with open(csv_file_output, 'wb') as csv_out:\n\twriter = csv.writer(csv_out)\n\twriter.writerow(output_dict.keys())\n\twriter.writerows(zip(*output_dict.values()))'''\n\n# Write it in a BNT easy form\n# {material 1, contact 1, Joint 1, material 2, action, effect}\n\n'''with open(csv_file_clean, 'wb') as csv_out:\n\twriter = csv.writer(csv_out)\n\twriter.writerow(dict_classes.keys())\n\twriter.writerows(zip(*dict_classes.values()))'''\n\nrand_elts = random.sample(range(0,len(neg_dict['material_1'])+1), len(pos_dict['material_1']))\n\nneg_dict_final = {'material_1':[], 'contact_1':[], 'joint_1':[], 'material_2':[], 'action':[], 'effect':[]}\n\nfor elt in rand_elts:\n\tfor keys in neg_dict.keys():\n\t\tneg_dict_final[keys].append(neg_dict[keys][elt])\n\n'''with open(csv_neg, 'wb') as csv_out:\n\twriter = csv.writer(csv_out)\n\twriter.writerow(neg_dict_final.keys())\n\twriter.writerows(zip(*neg_dict_final.values()))\n\nwith open(csv_pos, 'wb') as csv_out:\n\twriter = csv.writer(csv_out)\n\twriter.writerow(pos_dict.keys())\n\twriter.writerows(zip(*pos_dict.values()))'''\n\nwith open(csv_bal, 'wb') as csv_out:\n\twriter = csv.writer(csv_out)\n\twriter.writerow(pos_dict.keys())\n\twriter.writerows(zip(*pos_dict.values()))\n\twriter.writerows(zip(*neg_dict_final.values()))","sub_path":"macgyvering_peripherals/BNT_learning/DS_gen.py","file_name":"DS_gen.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594729398","text":"import os\n\nfrom .base import *\n\nSECRET_KEY = 'local'\n\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\nCELERY_BROKER_URL = 'pyamqp://guest@localhost'\n\nDATABASES['default'] = dj_database_url.config(\n default='sqlite:////{}'.format(os.path.join(BASE_DIR, 'db.sqlite3'))\n)\n","sub_path":"steve/config/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516060459","text":"tv = [\"GoT\", \"Narcos\", \"Vice\"]\r\ni = 0\r\nfor show in tv:\r\n new = tv[i]\r\n new = new.upper()\r\n tv[i] = new\r\n i += 1\r\nprint (tv)\r\n\r\n\r\nqs = [\"What is your name? \",\r\n \"What is your favorite color? \",\r\n \"What is your quest? \"]\r\nn = 0\r\nwhile True:\r\n print(\"Type q to quit\")\r\n a = input(qs[n])\r\n if a == \"q\":\r\n break\r\n n = (n + 1) % 3\r\n\r\nfor i in range(1, 6):\r\n if i == 3:\r\n continue\r\n print (i)\r\n","sub_path":"Notes_Chapter7.py","file_name":"Notes_Chapter7.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"282525876","text":"# Stores a student's name and a list of her courses and grades in a dict. Prints out her data.\n\n# Author: Cormac Hennigan\n\nstudent = {\"name\": \"Mary\", \"modules\":[{\"courseName\": \"Programming\", \"grade\": 45}, {\"courseName\": \"History\",\n \"grade\": 99}]}\n\nprint (\"Student: {}\".format(student[\"name\"]))\n\nfor module in student [\"modules\"]:\n print(\"\\t {} \\t {}\".format(module[\"courseName\"], module[\"grade\"]))","sub_path":"Topic05-datastructures/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371914840","text":"# 그냥 sliding window가 답이었네 \ndef solution(s):\n n=len(s)\n for expectation in range(n,0,-1):\n for i in range(0, n-expectation+1):\n test=s[i:i+expectation]\n if test==test[::-1]:\n return expectation\n return 1\n \n\n## 쓸데없이 dp로 접근, 더군다나 논리적 오류도 포함된 것으로 판단됨\ndef solution(s):\n import sys\n sys.setrecursionlimit(10*6)\n n = len(s)\n dp=[0 for _ in range(n)]\n dpe=[0 for _ in range(n)]\n def scanning_odd(x,d):\n if x-d>=0 and x+d=0 and x+1+d 3 and difference <= 5:\n user = int(input(\"Warm. Guess one more time! \"))\n elif difference > 5 and difference <= 8:\n user = int(input(\"Cold. Guess one more time! \"))\n elif difference > 8 and difference <= 13:\n user = int(input(\"Very cold. Guess one more time! \"))\n elif difference > 13 and difference <= 20:\n user = int(input(\"Extremely cold. Guess one more time! \"))\n elif difference > 20:\n user = int(input(\"Icy freezing miserably cold. Guess again! \"))\n else:\n print(\"Congratulations. You figured it out in\", i, \"tries.\")\n\n # loop to check times of guessing\n if i == 1:\n print(\"That was lucky!\")\n elif i >= 2 and i <= 4:\n print(\"That was amazing!\")\n elif i >= 5 and i <= 6:\n print(\"That was okay.\")\n elif i == 7:\n print(\"Meh.\")\n elif i >= 8 and i <= 9:\n print(\"This is not your game.\")\n elif i >= 10:\n print(\"You are the worst guesser I've ever seen.\")\n\n\nmain()\n","sub_path":"lab03/guess_advanced.py","file_name":"guess_advanced.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128388447","text":"import pickle\nimport csv\nimport operator\nimport random\nimport numpy as np\n\nglobal arr\n\ndraftpos = 10\ntotalpos = []\ntemp = draftpos\nfor i in range(15):\n totalpos.append(draftpos + i*28)\n totalpos.append(draftpos + i*28 + 9)\ntotalpos = totalpos[:15]\nprint(totalpos)\n\nnew_array = pickle.load(open(\"nameranking.p\", \"rb\"))\nfullarray = pickle.load(open(\"allinfo.p\",\"rb\"))\n\narr = []\n\n\"\"\"\nfor i in enumerate(fullarray):\n print(i)\nprint(len(fullarray))\n\"\"\"\n\n#name,valuescore,adp\nfor index,i in enumerate(fullarray):\n if index==0:\n continue\n arr.append((i[0],float(i[7]),int(i[9]),i[3]))\n\narr = sorted(arr, key=lambda x:x[2])\n\n\n\nfor i in arr:\n print(i)\nprint(len(new_array))\n\n\npickplayerarr = []\nfor i in range(12):\n lower = totalpos[i]-2\n upper = totalpos[i]+5\n player_list = []\n for j in range(lower,upper+1,1):\n player_list.append(arr[j-1][0])\n pickplayerarr.append(player_list)\n\n\nfor index,i in enumerate(pickplayerarr):\n print(\"Choices for Pick \" + str(index+1) + \" Are: \" + str(i))\n\ndef findposition(round,string):\n middle = totalpos[round-1]\n lower = middle-7\n upper = middle+7\n for j in range(lower,upper+1,1):\n print(arr[j][0])\n if arr[j][0] == \"Kenyan Drake\":\n bob = 5\n if arr[j][0]==string:\n return arr[j][3][:2].upper()\n return \"AB\"\n\n\ndef quarterback_in_round(roundnum, round):\n for i in round:\n if i==\"Patrick Mahomes\":\n bob = 5\n middle = totalpos[roundnum-1]\n lower = middle-10\n upper = middle+10\n for j in range(lower,upper+1,1):\n if arr[j][0]==i:\n if arr[j][3][:2]==\"QB\":\n return True\n return False\n\ndef create_weights(lower,upper,length):\n #lower is how low from adp\n #upper is how high from adp\n assert lower + upper + 1 == length\n\n equal_prob = 1.0/length\n low_prob = equal_prob/2\n upper_prob = (1.0-equal_prob*4-low_prob*(lower-1))/(length-lower+1-4)\n weights = []\n for i in range(lower-1):\n weights.append(low_prob)\n for i in range(4):\n weights.append(equal_prob)\n for i in range(upper-2):\n weights.append(upper_prob)\n\n print(np.sum(weights))\n assert len(weights)==length\n assert np.sum(weights) == 1.0\n\n return weights\n\n\ndef create_dict():\n dict = {}\n dict[\"QB\"] = \"\"\n dict[\"RB1\"] = \"\"\n dict[\"RB2\"] = \"\"\n dict[\"WR1\"] = \"\"\n dict[\"WR2\"] = \"\"\n dict[\"TE\"] = \"\"\n dict[\"B1\"] = \"\"\n dict[\"B2\"] = \"\"\n dict[\"B3\"] = \"\"\n dict[\"B4\"] = \"\"\n dict[\"B5\"] = \"\"\n dict[\"B6\"] = \"\"\n return dict\n\ndef print_dict(dict):\n for key in dict.keys():\n print(str(key)+\": \" + str(dict[key]))\n\n#picking out of the choices\nqbchoices = []\nfor i in range(12):\n if quarterback_in_round(i+1,pickplayerarr[i]):\n qbchoices.append(i+1)\nqbpos = random.choice(qbchoices)\nprint(qbpos)\nround_number = 6\nprint(quarterback_in_round(round_number,pickplayerarr[round_number-1]))\n\ndraftdict = create_dict()\n\n#drafting\nalreadytightend = False\nprobability_density = create_weights(2,5,8)\nfor i in range(12):\n round = i+1\n #QB\n if qbpos == round:\n for j in pickplayerarr[i]:\n if findposition(round,j)==\"QB\":\n draftdict[\"QB\"] = j + \" \" + str(round)\n break\n else:\n #pick random guy\n pick = random.choices(pickplayerarr[i],weights=probability_density,k=1)[0]\n position = findposition(round,pick)\n while(position==\"QB\"):\n pick = random.choices(pickplayerarr[i],weights=probability_density,k=1)[0]\n position = findposition(round, pick)\n #Tight End\n if alreadytightend:\n while(position==\"TE\" or position==\"QB\"):\n pick = random.choices(pickplayerarr[i],weights=probability_density,k=1)[0]\n position = findposition(round, pick)\n else:\n if position==\"TE\":\n if not alreadytightend:\n draftdict[position] = pick + \" \" + str(round)\n alreadytightend = True\n continue\n if not draftdict[position+\"1\"]==\"\":\n if not draftdict[position+\"2\"]==\"\":\n counter = 1\n while not draftdict[\"B\"+str(counter)] == \"\":\n counter+=1\n draftdict[\"B\"+str(counter)] = pick + \" \" + str(round)\n else:\n draftdict[position + \"2\"] = pick + \" \" + str(round)\n else:\n draftdict[position + \"1\"] = pick + \" \" + str(round)\nprint(\"done\")\n\nprint_dict(draftdict)\n\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532425716","text":"# coding: utf_8\n\n\nimport os\nimport re\n\n\nclass parseTestFile():\n name = None\n prere = None\n expected = None\n procedure = None\n result = None\n # file1_url = None\n auto = None\n def parseText(self, text=None):\n if text:\n lines = text[0].strip('\\n').split('\\n')\n\n for i, line in enumerate(lines):\n res = line + '
'\n res = re.sub(r'\\[code\\]', '
', res)\n res = re.sub(r'\\[/code\\]', '
', res)\n res = re.sub(r'

', '
', res)\n lines[i] = res\n\n return ''.join(lines)\n else:\n return None\n\n def parseFileUrl(self, text=None):\n return text[0].strip('\\n')\n\n\n def __init__(self, file_path):\n try:\n f = open(file_path, 'r')\n data = f.read()\n f.close()\n\n text = re.findall(r'\\[NAME\\](.*?)\\[/NAME\\]', data, re.DOTALL)\n self.name = self.parseText(text)\n text = re.findall(r'\\[PRERE\\](.*?)\\[/PRERE\\]', data, re.DOTALL)\n self.prere = self.parseText(text)\n text = re.findall(r'\\[EXPECTED\\](.*?)\\[/EXPECTED\\]', data, re.DOTALL)\n self.expected = self.parseText(text)\n text = re.findall(r'\\[PROCEDURE\\](.*?)\\[/PROCEDURE\\]', data, re.DOTALL)\n self.procedure = self.parseText(text)\n text = re.findall(r'\\[RESULT\\](.*?)\\[/RESULT\\]', data, re.DOTALL)\n self.result = self.parseText(text)\n text = re.findall(r'\\[AUTO\\](.*?)\\[/AUTO\\]', data, re.DOTALL)\n self.auto = self.parseText(text)\n\n # text = re.findall(r'\\[FILE1\\](.*?)\\[/FILE1\\]', data, re.DOTALL)\n # self.file1_url = self.parseFileUrl(text)\n except Exception as e:\n raise Exception(e)\n\n\n\n\n\n# file = parseTestFile('/home/yarikov/web/testeros/apps/tests/test009.txt')\n# print('PRERE')\n# print(file.prere)\n# print('EXPECTED')\n# print(file.expected)\n# print('PROCEDURE')\n# print(file.procedure)\n# print('RESULT')\n# print(file.result)\n# print('file')\n# print(file.file1_url)\n\n\n","sub_path":"magic/parsetest.py","file_name":"parsetest.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245577203","text":"from socket import *\n\nADDR = ('127.0.0.1', 6666)\nsockfd = socket(AF_INET, SOCK_DGRAM)\nwhile True:\n data = input(\"word>>\")\n if not data:\n break\n sockfd.sendto(data.encode(), ADDR)\n msg, addr = sockfd.recvfrom(1024)\n print(\"解释\", msg.decode())\n","sub_path":"month02/day04/udp.client.py","file_name":"udp.client.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"604929443","text":"from rest_framework import serializers\nfrom main.models import Product\nfrom .models import Cart, CartProduct\n\n\nclass CartProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = CartProduct\n fields = ('product', 'quantity')\n\n\n\nclass CartProductRepresentationSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(source='product.id')\n title = serializers.CharField(source='product.title')\n class Meta:\n model = CartProduct\n fields = ('id', 'title', 'price', 'quantity')\n\n\nclass AddProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('id', )\n\n\nclass CartSerializer(serializers.ModelSerializer):\n items = CartProductSerializer(many=True, write_only=True)\n count = serializers.IntegerField(required=False, default=1)\n class Meta:\n model = Cart\n fields = ('id', 'count', 'items')\n\n def get_total_cost(self, obj):\n return obj.get_total_cost()\n\n\n def create(self, validated_data):\n request = self.context.get('request')\n print(request)\n items = validated_data.pop('items')\n print(items)\n cart = Cart.objects.create(**validated_data)\n if request.user.is_authenticated:\n cart.user = request.user\n cart.save()\n\n for item in items:\n product = item['product']\n CartProduct.objects.create(cart=cart, product=product, quantity=item['quantity'])\n product.save()\n return cart\n\n\n def update(self, instance, validated_data):\n print('hel')\n instance.count = validated_data.get('quantity', instance.quantity)\n instance.save()\n return instance\n\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['user'] = instance.user.email\n representation['product'] = CartProductRepresentationSerializer(instance.cart.all(), many=True, context=self.context).data\n return representation","sub_path":"cart/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300044011","text":"from controller_interface import *\nfrom inertial_controller import *\nimport time\nfrom ctypes import *\nimport os\nimport os_detect\nfrom sys import platform as _platform\nfrom workspace import *\n\n\n# FPS = 30.0\n# timeperframe = 1.0/FPS\n# print(timeperframe, FPS)\n\nCONTROLLER_MAX_TRANSLATION = 350.0\n\nclass SpacenavController(InertialController):\n\tdef __init__(self, thread):\n\t\tInertialController.__init__(self,thread)\n\t\tprint(os.path.dirname(__file__))\n\t\t# self.mouselib = CDLL(\"/Spacenavig-Library/spacenavig_python.so\")\n\t\tself.mode = TRANSLATIONAL_MODE\n\t\tif os_detect.isUnix:\n\t\t\tself.mouselib = CDLL(os.path.abspath(os.path.dirname(__file__)+\"/../Spacenavig-Library/Linux/spacenavig_python.so\"))\n\t\telse:\n\t\t\tself.mouselib = CDLL(os.path.abspath(os.path.dirname(__file__)+\"/../Spacenavig-Library/Windows/x64/Release/spacenavig_python.dll\"))\n\n\t\tself.fric_acc = 40 \n\t\tself.maxAcc = 5.0\n\t\tself.maxVel = 7.0\n\n\t\tself.readEvent = self.mouselib.readEvent\n\t\tself.initializeMouse = self.mouselib.init\n\t\tself.getMousePosition = self.mouselib.getMousePosition\n\t\tself.values = (c_float * 3)(1, 2, 3)\n\t\tself.values_ptr = pointer(self.values)\n\n\t\tself.initializeMouse()\n\t\t# print(\"INIT\", thread)\n\t\tself.is_connected = True\n\t\tself.x = HOME[0]\n\t\tself.y = HOME[1]\n\t\tself.z = HOME[2]\n\tdef setAcc(self,x,y,z):\n\t\tif(self.mode == INERTIAL_MODE):\n\t\t\tself.ax = x\n\t\t\tself.ay = y\n\t\t\tself.az = z\n\t\telif(self.mode == TRANSLATIONAL_MODE):\n\t\t\tself.vx = x\n\t\t\tself.vy = y\n\t\t\tself.vz = z\n\n\tdef tick(self):\n\t\tself.readEvent(c_double(time.time()))\n\t\tself.getMousePosition(self.values_ptr)\n\t\tl = list(self.values)\n\t\t# print(l)\n\t\tif(self.mode == INERTIAL_MODE):\n\t\t\tc = self.maxAcc/CONTROLLER_MAX_TRANSLATION\n\t\telif(self.mode == TRANSLATIONAL_MODE):\n\t\t\tc = self.maxVel/CONTROLLER_MAX_TRANSLATION\n\n\t\tz = -l[2]*c\n\t\tthreshold = 3\n\t\tif(z > -threshold and z < 0):\n\t\t\tz = 0\n\t\telif(z < 0):\n\t\t\tz += threshold\n\t\tif(z < threshold and z > 0):\n\t\t\tz = 0\n\t\telif(z > 0):\n\t\t\tz -= threshold\n\t\tself.setAcc(l[0]*c,l[1]*c,z*2)\n\n\t\tself.step(self.timeperframe)\n\n\t\t# bound2(z, Z_MIN, Z_MAX)\n\t\t# bound2(z, Z_MIN, Z_MAX)\n\t\t# bound2(z, Z_MIN, Z_MAX)\n\n\t\tdesiredPos = (self.x, self.y, self.z)\n\n\t\tboundedPos = boundDestination(self.thread.currentPos, desiredPos)\n#### print \"BOUNDED: \" + str(boundedPos)\n\t\trestrainedPos = restrainMove(self.thread.currentPos, boundedPos)\n\t\t\t# Make move distance no more than 1\" for mechanism safety.\n#### print \"RESTRAINED: \" + str(restrainedPos\n#### print \"\\n\"\n\t\tposOut = restrainedPos\n\n\t\tself.x = posOut[0]\n\t\tself.y = posOut[1]\n\t\tself.z = posOut[2]\n\n\t\t# SEND 'posOut' TO THE ROBOT, if serial connected.\n\t\tif self.thread.serConnected:\n\t\t\tself.thread.outputPosition(posOut)\n\n\t\tself.thread.currentPos = posOut # Save position just sent to robot.\n\t\t# self.thread.outputPosition(posOut)\n\n\t\ttime.sleep(self.timeperframe)\n\tdef start(self):\n\t\tself.running = True\n\tdef stop(self):\n\t\tself.running = False\n\t\n\n\n\n\n\n\n\n\n\t","sub_path":"Python/spacenav_controller.py","file_name":"spacenav_controller.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"133761076","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\n#���Ͻ��λ������\ndef addnumDNA(featdir, chainfile, outfile):\n fw_out = open(outfile, 'w')\n #print (featdir)\n with open(chainfile, 'r') as fr_chain:\n for eachchain in fr_chain:\n chainname = eachchain.strip()\n if not chainname:\n continue\n path_feat = featdir + '/' + chainname + '.data'\n with open(path_feat, 'r') as fo_feat:\n fr_feat = fo_feat.readlines()\n size = len(fr_feat)\n # print size\n sum = 0\n for i in range(size):\n if fr_feat[i].strip():\n sum += float(fr_feat[i])\n # print sum\n fw_out.write(str(float(sum)) + '\\n')\n fw_out.close()\n\n'''\nif __name__ == \"__main__\":\n featdir = '/ifs/gdata1/yangwenyi/Protein_DNANEW/Double/ClassI/encode/encode_numdnabind'\n chainfile = '/ifs/gdata1/yangwenyi/Protein_DNANEW/Double/ClassI/chainDouble_D1.txt'\n outfile = '/ifs/gdata1/yangwenyi/Protein_DNANEW/Double/ClassI/encode/mergedfea/added_encode_numdnabind.data'\n addnumRNA(featdir, chainfile, outfile)\n'''","sub_path":"PreDBA/code/addnumofDNAbind.py","file_name":"addnumofDNAbind.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318135373","text":"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\nfrom common.base_model_init import BaseModelInitializer\nfrom common.base_model_init import set_env_var\n\nimport os\nfrom argparse import ArgumentParser\n\n\nclass ModelInitializer(BaseModelInitializer):\n \"\"\"initialize mode and run benchmark for FaceNet model\"\"\"\n\n def __init__(self, args, custom_args=[], platform_util=None):\n super(ModelInitializer, self).__init__(args, custom_args, platform_util)\n self.cmd = self.get_numactl_command(self.args.socket_id) + \\\n self.python_exe + \" \"\n\n # Set KMP env vars, if they haven't already been set\n self.set_kmp_vars()\n\n pairs_file = os.path.join(self.args.model_source_dir,\n \"data/pairs.txt\")\n arg_parser = ArgumentParser(description='Parse custom args')\n arg_parser.add_argument(\n \"--lfw_pairs\", type=str,\n help=\"The file containing the pairs to use for validation.\",\n dest=\"lfw_pairs\", default=pairs_file)\n self.args = arg_parser.parse_args(\n self.custom_args, namespace=self.args)\n\n # use default batch size if -1\n if self.args.batch_size == -1 or self.args.accuracy_only:\n self.args.batch_size = 100\n\n # set num_inter_threads and num_intra_threads\n if self.args.batch_size > 32:\n self.set_num_inter_intra_threads(num_inter_threads=2)\n else:\n self.set_num_inter_intra_threads(num_inter_threads=1)\n\n set_env_var(\"OMP_NUM_THREADS\", self.args.num_intra_threads)\n\n run_script = os.path.join(self.args.model_source_dir,\n \"src/validate_on_lfw.py\")\n\n warmup_steps = 40\n max_steps = 1000\n if self.args.batch_size == 1:\n warmup_steps = 200\n\n cmd_args = ' ' + self.args.data_location + \\\n ' ' + self.args.checkpoint + ' --distance_metric 1' + \\\n ' --use_flipped_images' + ' --subtract_mean' + \\\n ' --use_fixed_image_standardization' + \\\n ' --num_inter_threads=' + \\\n str(self.args.num_inter_threads) + \\\n ' --num_intra_threads=' + \\\n str(self.args.num_intra_threads) + \\\n ' --lfw_batch_size=' + str(self.args.batch_size) + \\\n ' --lfw_pairs=' + self.args.lfw_pairs + \\\n ' --warmup_steps=' + str(warmup_steps) + \\\n ' --max_steps=' + str(max_steps)\n\n self.cmd = self.cmd + run_script + cmd_args\n\n def run(self):\n \"\"\"run command to enable model benchmark or accuracy measurement\"\"\"\n\n original_dir = os.getcwd()\n os.chdir(self.args.model_source_dir)\n if self.cmd:\n self.run_command(self.cmd)\n os.chdir(original_dir)\n","sub_path":"benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py","file_name":"model_init.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64884372","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# vals program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# vals program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with vals program. If not, see .\n#\n##############################################################################\n\nfrom datetime import datetime\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\n# _____________________________________________PROCEDIMENTOS______________________________________\n\n\nclass sncp_despesa_procedimentos(osv.Model):\n _name = 'sncp.despesa.procedimentos'\n _description = u\"Procedimentos\"\n _rec_name = 'codigo_120'\n\n def unlink(self, cr, uid, ids, context=None):\n for nid in ids:\n obj = self.browse(cr, uid, nid)\n\n cr.execute(\"\"\"\n SELECT id\n FROM sncp_despesa_compromisso_dados_adic\n WHERE procedimento_id = %d\n \"\"\" % obj.id)\n\n res_dados = cr.fetchall()\n\n if len(res_dados) != 0:\n raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o procedimento ' +\n unicode(obj.codigo_120)\n + u' têm associação em:\\n'\n u'1. Compromissos.'))\n\n return super(sncp_despesa_procedimentos, self).unlink(cr, uid, ids, context=context)\n\n _columns = {\n 'name': fields.text(u'Descrição', size=255),\n 'codigo_120': fields.integer(u'Código', size=10),\n }\n _order = 'codigo_120'\n\n _sql_constraints = [\n ('codigo_procedimentos_unique', 'unique (codigo_120)', u'Este código já existe'),\n ]\n\nsncp_despesa_procedimentos()\n# _____________________________________________FUNDAMENTOS______________________________________\n\n\nclass sncp_despesa_fundamentos(osv.Model):\n _name = 'sncp.despesa.fundamentos'\n _description = u\"Fundamentos\"\n _rec_name = 'codigo_120'\n\n def unlink(self, cr, uid, ids, context=None):\n for nid in ids:\n obj = self.browse(cr, uid, nid)\n\n cr.execute(\"\"\"\n SELECT id\n FROM sncp_despesa_compromisso_dados_adic\n WHERE fundamento_id = %d\n \"\"\" % obj.id)\n\n res_dados = cr.fetchall()\n\n if len(res_dados) != 0:\n raise osv.except_osv(_(u'Aviso'), _(u'Verifique se o fundamento ' +\n unicode(obj.codigo_120)\n + u' têm associação em:\\n'\n u'1. Compromissos.'))\n\n return super(sncp_despesa_fundamentos, self).unlink(cr, uid, ids, context=context)\n\n _columns = {\n 'name': fields.text(u'Norma legal', size=255),\n 'codigo_120': fields.char(u'Fundamento', size=9),\n }\n _order = 'codigo_120'\n\n _sql_constraints = [\n ('codigo_fundamentos_unique', 'unique (codigo_120)', u'Este código já existe'),\n ]\n\nsncp_despesa_fundamentos()\n# _____________________________________________NATUREZAS______________________________________\n\n\nclass sncp_despesa_naturezas(osv.Model):\n _name = 'sncp.despesa.naturezas'\n _description = u\"Naturezas\"\n _rec_name = 'codigo_120'\n\n def unlink(self, cr, uid, ids, context=None):\n for nid in ids:\n obj = self.browse(cr, uid, nid)\n\n cr.execute(\"\"\"\n SELECT id\n FROM sncp_despesa_compromisso_dados_adic\n WHERE natureza_id = %d\n \"\"\" % obj.id)\n\n res_dados = cr.fetchall()\n\n if len(res_dados) != 0:\n raise osv.except_osv(_(u'Aviso'), _(u'Verifique se a natureza ' +\n unicode(obj.codigo_120)\n + u' têm associação em:\\n'\n u'1. Compromissos.'))\n\n return super(sncp_despesa_naturezas, self).unlink(cr, uid, ids, context=context)\n\n _columns = {\n 'name': fields.char(u'Descrição', size=255),\n 'codigo_120': fields.char(u'Natureza', size=4),\n 'empreitada': fields.boolean(u'Empreitada'),\n }\n _order = 'codigo_120'\n\n _sql_constraints = [\n ('codigo_natureza_unique', 'unique (codigo_120)', u'Este código já existe'),\n ]\n\nsncp_despesa_naturezas()\n\n# _____________________________________________APROVADORES______________________________________\n\n\nclass sncp_despesa_aprovadores(osv.Model):\n _name = 'sncp.despesa.aprovadores'\n _description = u\"Aprovadores\"\n\n def name_get(self, cr, uid, ids, context=None):\n if not ids:\n return []\n if isinstance(ids, (int, long)):\n ids = [ids]\n reads = self.read(cr, uid, ids, ['name'], context=context)\n res = []\n for record in reads:\n result = u'Aprovações'\n res.append((record['id'], result))\n return res\n\n def on_change_requisicoes(self, cr, uid, ids, requisicao):\n if requisicao is True:\n limite_req = 999999999999.99\n else:\n limite_req = 0.0\n\n if len(ids) != 0:\n self.write(cr, uid, ids, {'requisicoes': requisicao, 'limite_req': limite_req})\n\n return {'value': {'requisicoes': requisicao, 'limite_req': limite_req}}\n\n def on_change_compras(self, cr, uid, ids, compras):\n if compras is True:\n limite_comp = 999999999999.99\n else:\n limite_comp = 0.0\n\n if len(ids) != 0:\n self.write(cr, uid, ids, {'compras': compras, 'limite_comp': limite_comp})\n\n return {'value': {'compras': compras, 'limite_comp': limite_comp}}\n\n def on_change_faturas(self, cr, uid, ids, faturas):\n if faturas is True:\n limite_fat = 999999999999.99\n else:\n limite_fat = 0.0\n\n if len(ids) != 0:\n self.write(cr, uid, ids, {'faturas': faturas, 'limite_fat': limite_fat})\n\n return {'value': {'faturas': faturas, 'limite_fat': limite_fat}}\n\n def on_change_pagamentos(self, cr, uid, ids, pagamentos):\n if pagamentos is True:\n limite_pagam = 999999999999.99\n else:\n limite_pagam = 0.0\n\n if len(ids) != 0:\n self.write(cr, uid, ids, {'pagamentos': pagamentos, 'limite_pagam': limite_pagam})\n\n return {'value': {'pagamentos': pagamentos, 'limite_pagam': limite_pagam}}\n\n def on_change_aprovador(self, cr, uid, ids, aprovador_id):\n db_hr_employee = self.pool.get('hr.employee')\n empregado = db_hr_employee.browse(cr, uid, aprovador_id)\n\n if empregado.resource_id.user_id.id is False:\n raise osv.except_osv(_(u'Aviso'), _(u'Associe um utilizador ao empregado '\n + unicode(empregado.name_related)+u'.'))\n\n if len(ids) != 0:\n self.write(cr, uid, ids, {'user_id': empregado.resource_id.user_id.id})\n\n return {'value': {'user_id': empregado.resource_id.user_id.id}}\n\n def valido(self, cr, uid, ids, context, vals):\n lista_aprovadores_id = self.search(cr, uid, [('user_id', '=', uid),\n ('departamento_id', '=', vals['departamento_id'])])\n\n db_hr_department = self.pool.get('hr.department')\n departamento = db_hr_department.browse(cr, uid, vals['departamento_id'])\n texto = None\n limite = None\n mensagem = None\n if len(lista_aprovadores_id) == 1:\n obj = self.browse(cr, uid, lista_aprovadores_id[0])\n\n data_inicial = datetime.strptime(obj.name, \"%Y-%m-%d %H:%M:%S\")\n data_final = datetime.strptime(obj.fim, \"%Y-%m-%d %H:%M:%S\")\n\n if vals['documento'] == 1:\n texto = u'Requisição'\n limite = obj.limite_req\n elif vals['documento'] == 2:\n texto = u'Ordem de Compra'\n limite = obj.limite_comp\n elif vals['documento'] == 3:\n texto = u'Fatura'\n limite = obj.limite_fat\n elif vals['documento'] == 4:\n texto = u'Ordem de Pagamento'\n limite = obj.limite_pagam\n\n if texto == u'Requisição' and obj.requisicoes is False:\n mensagem = u'Não tem permissão para autorizar ' + texto + \\\n u' do departamento ' + departamento.name\n\n elif texto == u'Ordem de Compra' and obj.compras is False:\n mensagem = u'Não tem permissão para autorizar ' + texto + \\\n u' do departamento ' + departamento.name\n\n elif texto == u'Fatura' and obj.faturas is False:\n mensagem = u'Não tem permissão para autorizar ' + texto + \\\n u' do departamento ' + departamento.name\n\n elif texto == u'Ordem de Pagamento' and obj.pagamentos is False:\n mensagem = u'Não tem permissão para autorizar ' + texto + \\\n u' do departamento ' + departamento.name\n\n if vals['datahora'] > data_final:\n mensagem = u'As suas permissões para o departamento ' + departamento.name + u' expiraram em '\\\n + unicode(data_final.date())\n\n elif vals['datahora'] < data_inicial:\n mensagem = u'Só terá permissão para autorizar ' + texto + u' do departamento ' + \\\n departamento.name + u' a partir de ' + unicode(data_inicial.date())\n\n if vals['montante'] > limite:\n mensagem = u'Só lhe é permitido autorizar ' + texto + u' até ao montante de ' + \\\n unicode(limite)\n\n if mensagem is not None:\n return [False, mensagem]\n else:\n return [True, mensagem]\n\n _columns = {\n 'aprovador_id': fields.many2one('hr.employee', u'Nome'),\n 'departamento_id': fields.many2one('hr.department', u'Departamento'),\n 'user_id': fields.many2one('res.users', u'Utilizador'),\n 'name': fields.datetime(u'Desde'),\n 'fim': fields.datetime(u'Até'),\n 'requisicoes': fields.boolean(u'Requisições'),\n 'compras': fields.boolean(u'Ordens de compra'),\n 'faturas': fields.boolean(u'Faturas'),\n 'pagamentos': fields.boolean(u'Ordens de Pagamento'),\n 'limite_req': fields.float(u'Limite para as Requisições', digits=(12, 2)),\n 'limite_comp': fields.float(u'Limite Ordens de Compra', digits=(12, 2)),\n 'limite_fat': fields.float(u'Limite para as Faturas', digits=(12, 2)),\n 'limite_pagam': fields.float(u'Limite Ordens Pagamento', digits=(12, 2)),\n 'estado': fields.integer(u''),\n }\n\n _order = 'aprovador_id,departamento_id'\n\n _defaults = {\n 'name': unicode(datetime(datetime.now().year, datetime.now().month, datetime.now().day,\n datetime.now().hour, datetime.now().minute, datetime.now().second)),\n 'fim': unicode(datetime(datetime.now().year+20, datetime.now().month, datetime.now().day,\n datetime.now().hour, datetime.now().minute, datetime.now().second)),\n 'limite_req': 0.0,\n 'limite_comp': 0.0,\n 'limite_fat': 0.0,\n 'limite_pagam': 0.0,\n }\n\n def create(self, cr, uid, vals, context=None):\n if 'aprovador_id' in vals and 'user_id' not in vals:\n db_hr_employee = self.pool.get('hr.employee')\n empregado = db_hr_employee.browse(cr, uid, vals['aprovador_id'])\n vals['user_id'] = empregado.resource_id.user_id.id\n return super(sncp_despesa_aprovadores, self).create(cr, uid, vals, context=context)\n\n def _datas_restrict(self, cr, uid, ids):\n record = self.browse(cr, uid, ids[0])\n if record.name >= record.fim:\n raise osv.except_osv(_(u'Aviso'), _(u' A data inicial deve ser inferior à data final.'))\n return True\n\n def _montante_positivo(self, cr, uid, ids):\n record = self.browse(cr, uid, ids[0])\n if record.limite_req <= 0.0 and record.requisicoes is True:\n raise osv.except_osv(_(u'Aviso'), _(u'O limite para as requisições têm de ser positivo.'))\n\n elif record.limite_comp <= 0.0 and record.compras is True:\n raise osv.except_osv(_(u'Aviso'), _(u'O limite para as ordens de compra têm de ser positivo.'))\n\n if record.limite_fat <= 0.0 and record.faturas is True:\n raise osv.except_osv(_(u'AViso'), _(u'O limite para as faturas têm de ser positivo.'))\n\n if record.limite_pagam <= 0.0 and record.pagamentos is True:\n raise osv.except_osv(_(u'Aviso'), _(u'O limite para as ordens de pagamento têm de ser positivo.'))\n\n return True\n\n _constraints = [\n (_datas_restrict, u'', ['name', 'fim']),\n (_montante_positivo, u'', ['limite_req', 'limite_comp', 'limite_fat', 'limite_pagam']),\n ]\n\n _sql_constraints = [\n ('aprovador_departamento_unique', 'unique (aprovador_id,departamento_id)',\n u'Duplicação de aprovador e departamento!'), ]\n\nsncp_despesa_aprovadores()","sub_path":"addons_miguel/despesa/dados_gerais.py","file_name":"dados_gerais.py","file_ext":"py","file_size_in_byte":13919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"356769964","text":"# coding: utf-8\r\n# author: HotDogDevBr.\r\n# E-mail: hotdogdevbr@gmail.com.\r\n\"\"\"\r\n Modifique o primeiro programa, listagem 2.7, de forma a calcular a\r\nsoma de três variáveis.\r\nlistagem 2.7\r\na = 2\r\nb = 3\r\nprint(a + b)\r\n\"\"\"\r\na = 2\r\nb = 3\r\nc = 5\r\nprint(a + b + c)\r\n","sub_path":"Exercicios Livro Python/capitulo 2/exercicio 2.5.py","file_name":"exercicio 2.5.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100199745","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport imagekit.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0002_faq_priority'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Slider',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=250, null=True, verbose_name=b'Title', blank=True)),\n ('captions', models.CharField(max_length=250, null=True, verbose_name=b'Captions', blank=True)),\n ('image', imagekit.models.fields.ProcessedImageField(upload_to=b'images/slider', null=True, verbose_name=b'Image File', blank=True)),\n ('url', models.CharField(max_length=255, null=True, blank=True)),\n ('priority', models.IntegerField(default=3)),\n ],\n options={\n 'verbose_name': 'Slider',\n 'verbose_name_plural': 'Sliders',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"apps/home/migrations/0003_slider.py","file_name":"0003_slider.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"96158835","text":"# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport itertools\nimport logging\nfrom typing import cast\n\nfrom pants.backend.python.dependency_inference import import_parser, module_mapper\nfrom pants.backend.python.dependency_inference.import_parser import (\n ParsedPythonImports,\n ParsePythonImportsRequest,\n)\nfrom pants.backend.python.dependency_inference.module_mapper import PythonModule, PythonModuleOwners\nfrom pants.backend.python.dependency_inference.python_stdlib.combined import combined_stdlib\nfrom pants.backend.python.target_types import PythonSources, PythonTestsSources\nfrom pants.backend.python.util_rules import ancestor_files, pex\nfrom pants.backend.python.util_rules.ancestor_files import AncestorFiles, AncestorFilesRequest\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.core.util_rules import stripped_source_files\nfrom pants.engine.addresses import Address\nfrom pants.engine.internals.graph import Owners, OwnersRequest\nfrom pants.engine.rules import Get, MultiGet, SubsystemRule, rule\nfrom pants.engine.target import (\n Dependencies,\n DependenciesRequest,\n ExplicitlyProvidedDependencies,\n HydratedSources,\n HydrateSourcesRequest,\n InferDependenciesRequest,\n InferredDependencies,\n WrappedTarget,\n)\nfrom pants.engine.unions import UnionRule\nfrom pants.option.global_options import OwnersNotFoundBehavior\nfrom pants.option.subsystem import Subsystem\nfrom pants.python.python_setup import PythonSetup\n\nlogger = logging.getLogger(__name__)\n\n\nclass PythonInferSubsystem(Subsystem):\n options_scope = \"python-infer\"\n help = \"Options controlling which dependencies will be inferred for Python targets.\"\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--imports\",\n default=True,\n type=bool,\n help=(\n \"Infer a target's imported dependencies by parsing import statements from sources.\"\n ),\n )\n register(\n \"--string-imports\",\n default=False,\n type=bool,\n help=(\n \"Infer a target's dependencies based on strings that look like dynamic \"\n \"dependencies, such as Django settings files expressing dependencies as strings. \"\n \"To ignore any false positives, put `!{bad_address}` in the `dependencies` field \"\n \"of your target.\"\n ),\n )\n register(\n \"--inits\",\n default=False,\n type=bool,\n help=(\n \"Infer a target's dependencies on any __init__.py files existing for the packages \"\n \"it is located in (recursively upward in the directory structure).\\n\\nEven if this \"\n \"is disabled, Pants will still include any ancestor __init__.py files, only they \"\n \"will not be 'proper' dependencies, e.g. they will not show up in \"\n \"`./pants dependencies` and their own dependencies will not be used.\\n\\nIf you \"\n \"have empty `__init__.py` files, it's safe to leave this option off; otherwise, \"\n \"you should enable this option.\"\n ),\n )\n register(\n \"--conftests\",\n default=True,\n type=bool,\n help=(\n \"Infer a test target's dependencies on any conftest.py files in the current \"\n \"directory and ancestor directories.\"\n ),\n )\n register(\n \"--entry-points\",\n default=True,\n type=bool,\n help=(\n \"Infer dependencies on binary targets' entry points, e.g. `pex_binary`'s \"\n \"`entry_point` field and `python_awslambda`'s `handler` field.\"\n ),\n )\n\n @property\n def imports(self) -> bool:\n return cast(bool, self.options.imports)\n\n @property\n def string_imports(self) -> bool:\n return cast(bool, self.options.string_imports)\n\n @property\n def inits(self) -> bool:\n return cast(bool, self.options.inits)\n\n @property\n def conftests(self) -> bool:\n return cast(bool, self.options.conftests)\n\n @property\n def entry_points(self) -> bool:\n return cast(bool, self.options.entry_points)\n\n\nclass InferPythonImportDependencies(InferDependenciesRequest):\n infer_from = PythonSources\n\n\n@rule(desc=\"Inferring Python dependencies by analyzing imports\")\nasync def infer_python_dependencies_via_imports(\n request: InferPythonImportDependencies,\n python_infer_subsystem: PythonInferSubsystem,\n python_setup: PythonSetup,\n) -> InferredDependencies:\n if not python_infer_subsystem.imports:\n return InferredDependencies([], sibling_dependencies_inferrable=False)\n\n wrapped_tgt = await Get(WrappedTarget, Address, request.sources_field.address)\n explicitly_provided_deps, detected_imports = await MultiGet(\n Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),\n Get(\n ParsedPythonImports,\n ParsePythonImportsRequest(\n request.sources_field,\n InterpreterConstraints.create_from_targets([wrapped_tgt.target], python_setup),\n string_imports=python_infer_subsystem.string_imports,\n ),\n ),\n )\n relevant_imports = detected_imports - combined_stdlib\n\n owners_per_import = await MultiGet(\n Get(PythonModuleOwners, PythonModule(imported_module))\n for imported_module in relevant_imports\n )\n merged_result: set[Address] = set()\n for owners, imp in zip(owners_per_import, relevant_imports):\n merged_result.update(owners.unambiguous)\n address = wrapped_tgt.target.address\n explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(\n owners.ambiguous,\n address,\n import_reference=\"module\",\n context=f\"The target {address} imports `{imp}`\",\n )\n maybe_disambiguated = explicitly_provided_deps.disambiguated_via_ignores(owners.ambiguous)\n if maybe_disambiguated:\n merged_result.add(maybe_disambiguated)\n\n return InferredDependencies(sorted(merged_result), sibling_dependencies_inferrable=True)\n\n\nclass InferInitDependencies(InferDependenciesRequest):\n infer_from = PythonSources\n\n\n@rule(desc=\"Inferring dependencies on `__init__.py` files\")\nasync def infer_python_init_dependencies(\n request: InferInitDependencies, python_infer_subsystem: PythonInferSubsystem\n) -> InferredDependencies:\n if not python_infer_subsystem.inits:\n return InferredDependencies([], sibling_dependencies_inferrable=False)\n\n # Locate __init__.py files not already in the Snapshot.\n hydrated_sources = await Get(HydratedSources, HydrateSourcesRequest(request.sources_field))\n extra_init_files = await Get(\n AncestorFiles,\n AncestorFilesRequest(\"__init__.py\", hydrated_sources.snapshot),\n )\n\n # And add dependencies on their owners.\n # NB: Because the python_sources rules always locate __init__.py files, and will trigger an\n # error for files that have content but have not already been included via a dependency, we\n # don't need to error for unowned files here.\n owners = await MultiGet(\n Get(Owners, OwnersRequest((f,))) for f in extra_init_files.snapshot.files\n )\n return InferredDependencies(\n itertools.chain.from_iterable(owners), sibling_dependencies_inferrable=False\n )\n\n\nclass InferConftestDependencies(InferDependenciesRequest):\n infer_from = PythonTestsSources\n\n\n@rule(desc=\"Inferring dependencies on `conftest.py` files\")\nasync def infer_python_conftest_dependencies(\n request: InferConftestDependencies,\n python_infer_subsystem: PythonInferSubsystem,\n) -> InferredDependencies:\n if not python_infer_subsystem.conftests:\n return InferredDependencies([], sibling_dependencies_inferrable=False)\n\n # Locate conftest.py files not already in the Snapshot.\n hydrated_sources = await Get(HydratedSources, HydrateSourcesRequest(request.sources_field))\n extra_conftest_files = await Get(\n AncestorFiles,\n AncestorFilesRequest(\"conftest.py\", hydrated_sources.snapshot),\n )\n\n # And add dependencies on their owners.\n # NB: Because conftest.py files effectively always have content, we require an owning target.\n owners = await MultiGet(\n Get(Owners, OwnersRequest((f,), OwnersNotFoundBehavior.error))\n for f in extra_conftest_files.snapshot.files\n )\n return InferredDependencies(\n itertools.chain.from_iterable(owners), sibling_dependencies_inferrable=False\n )\n\n\n# This is a separate function to facilitate tests registering import inference.\ndef import_rules():\n return [\n infer_python_dependencies_via_imports,\n *pex.rules(),\n *import_parser.rules(),\n *module_mapper.rules(),\n *stripped_source_files.rules(),\n SubsystemRule(PythonInferSubsystem),\n SubsystemRule(PythonSetup),\n UnionRule(InferDependenciesRequest, InferPythonImportDependencies),\n ]\n\n\ndef rules():\n return [\n *import_rules(),\n infer_python_init_dependencies,\n infer_python_conftest_dependencies,\n *ancestor_files.rules(),\n UnionRule(InferDependenciesRequest, InferInitDependencies),\n UnionRule(InferDependenciesRequest, InferConftestDependencies),\n ]\n","sub_path":"src/python/pants/backend/python/dependency_inference/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":9614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"75617361","text":"N = int(input()) # total no. of tickets issued for lottery\r\nD = int(input()) # whose buy the ticket\r\nX = int(input()) # people that will be given prizes\r\n\r\ndef fun():\r\n\tsum=0\r\n\twhile(N>0):\r\n\t\tdig=N%10\r\n\t\tsum=sum+dig\r\n\t\tN=N//10\r\n\t\treturn ((D // X) % sum)\r\nfun()\r\n","sub_path":"JavaScript turtorial/jack.py","file_name":"jack.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"271850631","text":"\"\"\"Metadata for adapters.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nDEFAULT_NODE = \"master\"\n\nCSV_FILENAME = \"badwolf.csv\"\nCSV_FIELDS = [\"mac_address\", \"field1\"]\nCSV_ROW = [\"01:37:53:9E:82:7C\", \"e\"]\nCSV_FILECONTENTS = [\",\".join(CSV_FIELDS), \",\".join(CSV_ROW)]\nCSV_FILECONTENT_STR = \"\\r\\n\".join(CSV_FILECONTENTS) + \"\\r\\n\"\nCSV_FILECONTENT_BYTES = CSV_FILECONTENT_STR.encode()\n\nFAKE_CNX_OK = {\n \"adapter_name\": \"fluff1\",\n \"adapter_name_raw\": \"fluff1_adapter\",\n \"id\": \"foobar1\",\n \"node_name\": \"xbxb\",\n \"node_id\": \"xbxb\",\n \"uuid\": \"abc123\",\n \"status\": True,\n}\nFAKE_CNX_BAD = {\n \"adapter_name\": \"fluff2\",\n \"adapter_name_raw\": \"fluff2_adapter\",\n \"node_name\": \"xbxb\",\n \"node_id\": \"xbxb\",\n \"id\": \"foobar2\",\n \"uuid\": \"zxy987\",\n \"status\": False,\n}\nFAKE_CNXS = [FAKE_CNX_OK, FAKE_CNX_BAD]\nFAKE_ADAPTER_CNXS_OK = {\n \"cnx\": [FAKE_CNX_OK],\n \"name\": \"fluff1\",\n \"name_raw\": \"fluff1_adapter\",\n \"node_name\": DEFAULT_NODE,\n \"cnx_count\": 1,\n \"status\": True,\n}\nFAKE_ADAPTER_CNXS_BAD = {\n \"cnx\": FAKE_CNXS,\n \"name\": \"fluff2\",\n \"name_raw\": \"fluff2_adapter\",\n \"node_name\": DEFAULT_NODE,\n \"cnx_count\": 2,\n \"status\": False,\n}\nFAKE_ADAPTER_NOCLIENTS = {\n \"cnx\": [],\n \"name\": \"fluff3\",\n \"name_raw\": \"fluff3_adapter\",\n \"node_name\": DEFAULT_NODE,\n \"cnx_count\": 0,\n \"status\": None,\n}\nFAKE_ADAPTERS = [FAKE_ADAPTER_CNXS_BAD, FAKE_ADAPTER_CNXS_OK, FAKE_ADAPTER_NOCLIENTS]\nFAKE_FILE = \"badwolf_fake.txt\"\nFAKE_FILE_CONTENTS = \"id,hostname\\nbadwolf9131,badwolf\\n\"\nAD_CONFIG_SCHEMA_LIST = [\n [\"dc_name\", \"badwolf_default\"],\n [\"user\", CSV_FILENAME],\n [\"password\", CSV_FILENAME],\n [\"do_not_fetch_users\", False],\n [\"fetch_disabled_devices\", True],\n [\"fetch_disabled_users\", True],\n [\"dns_server_address\", \"badwolf\"],\n [\"alternative_dns_suffix\", \"badwolf\"],\n [\"use_ssl\", \"Unencrypted\"],\n [\"ca_file\", FAKE_FILE],\n [\"is_ad_gc\", \"n\"],\n [\"ldap_ou_whitelist\", \"badwolf1,badwolf2\"],\n]\nAD_CONFIG_SCHEMA = dict(AD_CONFIG_SCHEMA_LIST)\n","sub_path":"axonius_api_client/tests/meta/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"306946301","text":"import torch\r\nimport logging\r\nimport argparse\r\nimport time\r\n\r\nfrom utils.functions import print_and_log, save_pic, mkdir_if_not_exists\r\n\r\n\r\nclass MyTrainer():\r\n \"\"\"参数解析器类\"\"\"\r\n def __init__(self, NAME=\"Demo\"):\r\n self.NAME = NAME\r\n self.init_parser() # 初始化解析器\r\n self.log_args() # 对训练参数进行输出\r\n\r\n def init_parser(self):\r\n \"\"\"可在此处自定义 parser\"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--num-worker', type=int, default=8)\r\n parser.add_argument('--epochs', type=int, default=100)\r\n parser.add_argument('--batch-size', type=int, default=128)\r\n parser.add_argument('--debug', action='store_true')\r\n parser.add_argument('--gpu', type=str, default=\"0\")\r\n parser.add_argument('--checkpoint', type=str, default=\"\")\r\n self.parser = parser\r\n\r\n def log_args(self):\r\n\r\n self.args = self.parser.parse_args()\r\n\r\n # 判断当前可用设备,并对服务器的GPU进行配置\r\n if torch.cuda.is_available() and self.args.gpu != \"X\":\r\n self.device = torch.device(\"cuda:\" + self.args.gpu)\r\n else:\r\n self.device = torch.device(\"cpu\")\r\n\r\n # 创建此次训练的文件夹,模型、断点、损失信息都会保存在这个文件夹下面\r\n training_start_time = str(time.time()).split('.')[0]\r\n self.model_dir = './log/' + self.NAME + '/' + training_start_time\r\n\r\n mkdir_if_not_exists(self.model_dir, parents=True)\r\n\r\n if self.args.checkpoint:\r\n self.model_dir = self.args.checkpoint\r\n\r\n # 配置 log 的路径以及配置信息\r\n if self.args.debug:\r\n level = logging.DEBUG\r\n else:\r\n level = logging.INFO\r\n logging.basicConfig(\r\n filename=self.model_dir + '/training.log',\r\n format='[%(levelname)s] %(asctime)s %(filename)s[line:%(lineno)d]: %(message)s',\r\n level=level)\r\n\r\n msg = \"---\\n\"\r\n msg += \"Name: {}\\nDevice: {}\\nDebug: {}\\n---\\n\".format(self.NAME, self.device, self.args.debug)\r\n msg += \"Epochs: {}\\nBatch Size: {}\\nNum Worker: {}\\n---\\n\".format(\r\n self.args.epochs, self.args.batch_size, self.args.num_worker)\r\n print_and_log(msg)\r\n \r\n\r\n def start_with_checkpoint(self):\r\n pass\r\n\r\n\r\n def save_checkpoint(self):\r\n pass\r\n","sub_path":"trainer/my_trainer.py","file_name":"my_trainer.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"37690975","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom gtts import gTTS\nimport os\nimport json\n\na = json.load(\"BD.txt\")\ni = 0\nfor k in a.iteritems():\n\ttts = gTTS(text=k[0], lang='pt')\n\ttts.save(\"mp3/question/\" + i + \".mp3\")\n\ttts = gTTS(text=k[1], lang='pt')\n\ttts.save(\"mp3/answer/\" + i + \".mp3\")\n\ti+=1","sub_path":"import requests.py","file_name":"import requests.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357718509","text":"from openpyxl import workbook\r\nfrom openpyxl import worksheet\r\nfrom openpyxl import Workbook\r\nfrom openpyxl import load_workbook\r\nimport os.path as op \r\nimport os\r\nimport csv\r\n\r\nschool_year = input(r'School Year:')\r\nsection = input(r'Section:')\r\nactivitytype = input(r'Activity Type:')\r\nactivityno = int(input(r'Activity Number:'))\r\nstudwmisact = input(r'Student with Missing Activity:')\r\n\r\ndirectory = os.getcwd()\r\ndirectory = directory + r'\\\\Sheets\\\\'\r\nfiletype = r\".xlsx\"\r\nsydirectory = directory + school_year + r'\\\\' + section + r'\\\\'\r\n\r\nwith open(sydirectory + 'Number of Students and Activities.csv', 'rt') as f:\r\n data = csv.DictReader(f)\r\n for row in data:\r\n noStuds = row['No. of Students']\r\n noStuds = int(noStuds)\r\n\r\nfiledestination = sydirectory + section + filetype\r\n\r\nfile_exists = op.isfile(filedestination)\r\n\r\nwb = Workbook()\r\nwb = load_workbook(filedestination)\r\nsheet = wb[section]\r\n\r\nif activitytype == 'Performance':\r\n for x in range(noStuds):\r\n studentcell = sheet.cell(row = x + 3, column = 1).value\r\n highestpossiblegrade = sheet.cell(row = noStuds+2, column = activityno+1).value\r\n print(\"The highest possible grade of this activity is:\", highestpossiblegrade)\r\n if studentcell == studwmisact:\r\n activitycell = sheet.cell(row = x + 3, column = activityno + 1)\r\n studentgrade = int(input(r'Student Grade:'))\r\n activitycell.value = studentgrade\r\n wb.save(filename = filedestination)\r\n break\r\n\r\nif activitytype == 'Written':\r\n for x in range(noStuds):\r\n studentcell = sheet.cell(row = x + 3, column = 1).value\r\n highestpossiblegrade = sheet.cell(row = noStuds+2, column = activityno+11).value\r\n print(\"The highest possible grade of this activity is:\", highestpossiblegrade)\r\n if studentcell == studwmisact:\r\n activitycell = sheet.cell(row = x + 3, column = activityno + 11)\r\n studentgrade = int(input(r'Student Grade:'))\r\n activitycell.value = studentgrade\r\n wb.save(filename = filedestination)\r\n break\r\n","sub_path":"StuGraDP/scripts/owmissingactivty.py","file_name":"owmissingactivty.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"476068753","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# read mnist data from tensorflow example\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot = True)\nx_train = mnist.train.images\ny_train = mnist.train.labels\nx_test = mnist.test.images\ny_test = mnist.test.labels\n\n# look into the feature of the data\n# [data_num, feature_num]\nprint('--- x train shape ---')\nprint(x_train.shape)\n# [data_num, output_num]\nprint('--- y train shape ---')\nprint(y_train.shape)\nprint('--- x train shape ---')\nprint(x_test.shape)\nprint('--- y test shape ---')\nprint(y_test.shape)\n\n\nprint('---- input ----')\nprint(x_train[0, :])\nprint('---- output ----')\nprint(np.argmax(y_train[1, :]))\n\n# config\nlr = 0.01 # learning rate\ntrain_steps = 1000\nbatch_size = 100\nlogs_path = 'tensorboard/'\nn_features = x_train.shape[1] # 784\nn_labels = y_train.shape[1] # 10\n\nwith tf.Session(config=tf.ConfigProto()) as sess:\n with tf.name_scope('inputs'):\n x = tf.placeholder(tf.float32, [None, n_features], name = 'digit_input')\n\n with tf.name_scope('labels'):\n y = tf.placeholder(tf.float32, [None, n_labels], name = 'digit_label')\n\n # build variables\n with tf.name_scope('params'):\n w = tf.Variable(tf.zeros([n_features, n_labels]), name = 'weight')\n b = tf.Variable(tf.zeros([n_labels]), name = 'bias')\n\n # build model\n with tf.name_scope('model'):\n prediction = tf.nn.softmax(tf.matmul(x, w) + b)\n\n # define loss\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices = 1))\n tf.summary.scalar('Loss', loss)\n\n # Gradient Descent\n with tf.name_scope('gd'):\n optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('Accuracy', acc)\n\n # start run\n initializer = tf.global_variables_initializer()\n sess.run(initializer)\n\n # display data\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(logs_path, graph = sess.graph)\n\n for step in range(train_steps):\n x_batch, y_batch = mnist.train.next_batch(batch_size)\n sess.run(optimizer, feed_dict = {x: x_batch, y: y_batch})\n\n if step % 50 == 0:\n l, summary = sess.run([loss, merged], feed_dict = {x: x_batch, y: y_batch})\n ac = sess.run(acc, feed_dict={x: x_test, y: y_test})\n writer.add_summary(summary, step)\n\n print(\"Accuracy: %.2f, Loss: %.2f\" % (ac, l))\n","sub_path":"week2/mnist-hw1.py","file_name":"mnist-hw1.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"264208848","text":"from django.db import models\nfrom django.core.validators import MaxValueValidator\nimport datetime\n\n\n# Create your models here.\nclass Author(models.Model):\n first_name = models.CharField(\n 'name', max_length=32, blank=True)\n second_name = models.CharField(\n 'second name', max_length=32, blank=True)\n last_name = models.CharField(\n 'last name', max_length=32, blank=True)\n\n def __str__(self):\n return \"{}. {}. {}\".format(self.first_name[0], self.second_name[0], self.last_name)\n\n\nclass Genre(models.Model):\n name = models.TextField('name')\n\n def __str__(self):\n return self.name\n\n\nclass CycleOfStories(models.Model):\n title = models.CharField(\n 'title', max_length=126, blank=True)\n author = models.ForeignKey(\n Author, models.PROTECT, verbose_name='author', blank=True)\n\n def __str__(self):\n return \"{}, {}\".format(self.title, self.author)\n\n\nclass Book(models.Model):\n current_year = datetime.datetime.now().year\n\n title = models.CharField(\n 'title', max_length=126, blank=True)\n author = models.ForeignKey(\n Author, models.PROTECT, verbose_name='author', blank=True)\n cycle = models.ForeignKey(\n CycleOfStories, models.PROTECT, verbose_name='cycle of stories', blank=True)\n genre = models.ManyToManyField(\n Genre, verbose_name='genre'\n )\n year_of_publication = models.PositiveIntegerField(\n 'year of publication', default=current_year, validators=[MaxValueValidator(current_year)], blank=True)\n description = models.TextField(\n 'description', blank=True)\n\n book_path = 'test'\n\n image = models.ImageField(\n 'image', upload_to=book_path, blank=True)\n file_txt = models.FileField(\n '*.txt', upload_to=book_path, blank=True)\n file_fb2 = models.FileField(\n '*.fb2', upload_to=book_path, blank=True)\n\n def __str__(self):\n return \"{}, {}\".format(self.title, self.author)\n","sub_path":"books_server/books_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615761384","text":"# -*- coding: utf8 -*-\n\"\"\"Utility and helper methods for script.\n\nutil\n~~~~\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, \\\n with_statement, unicode_literals\n\nimport collections\nimport sys\nimport re\n\nfrom ._compat import string_types, text_type, unichr\n\n\ndef ucn_to_unicode(ucn):\n \"\"\"Convert a Unicode Universal Character Number (e.g. \"U+4E00\" or \"4E00\") to Python unicode (u'\\\\u4e00')\"\"\"\n if isinstance(ucn, string_types):\n ucn = ucn.strip(\"U+\")\n if len(ucn) > int(4):\n char = b'\\U' + format(int(ucn, 16), '08x').encode('latin1')\n char = char.decode('unicode_escape')\n else:\n char = unichr(int(ucn, 16))\n else:\n char = unichr(ucn)\n\n assert isinstance(char, text_type)\n\n return char\n\n\ndef ucnstring_to_python(ucn_string):\n \"\"\"Return string with Unicode UCN (e.g. \"U+4E00\") to native Python Unicode\n (u'\\\\u4e00').\n \"\"\"\n res = re.findall(\"U\\+[0-9a-fA-F]*\", ucn_string)\n for r in res:\n ucn_string = ucn_string.replace(text_type(r), text_type(ucn_to_unicode(r)))\n\n ucn_string = ucn_string.encode('utf-8')\n\n assert isinstance(ucn_string, bytes)\n return ucn_string\n\n\ndef ucnstring_to_unicode(ucn_string):\n \"\"\"Return ucnstring as Unicode.\"\"\"\n ucn_string = ucnstring_to_python(ucn_string).decode('utf-8')\n\n assert isinstance(ucn_string, text_type)\n return ucn_string\n\n\ndef _dl_progress(count, block_size, total_size, out=sys.stdout):\n \"\"\"\n MIT License: https://github.com/okfn/dpm-old/blob/master/dpm/util.py\n\n Modification for testing: http://stackoverflow.com/a/4220278\n\n \"\"\"\n def format_size(bytes):\n if bytes > 1000 * 1000:\n return '%.1fMb' % (bytes / 1000.0 / 1000)\n elif bytes > 10 * 1000:\n return '%iKb' % (bytes / 1000)\n elif bytes > 1000:\n return '%.1fKb' % (bytes / 1000.0)\n else:\n return '%ib' % bytes\n\n if not count:\n print('Total size: %s' % format_size(total_size))\n last_percent = int((count - 1) * block_size * 100 / total_size)\n # may have downloaded less if count*block_size > total_size\n maxdownloaded = count * block_size\n percent = min(int(maxdownloaded * 100 / total_size), 100)\n if percent > last_percent:\n # TODO: is this acceptable? Do we want to do something nicer?\n out.write(\n '%3d%% [%s>%s]\\r' % (\n percent,\n int(percent / 2) * '=',\n int(50 - percent / 2) * ' '\n )\n )\n out.flush()\n if maxdownloaded >= total_size:\n print('\\n')\n\n\n# Code from https://github.com/pypa/warehouse\n# Copyright 2013 Donald Stufft\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass AttributeDict(dict):\n\n def __getattr__(self, name):\n if not name in self:\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n self.__class__,\n name,\n ))\n\n return self[name]\n\n\ndef merge_dict(base, additional):\n if base is None:\n return additional\n\n if additional is None:\n return base\n\n if not (isinstance(base, collections.Mapping)\n and isinstance(additional, collections.Mapping)):\n return additional\n\n merged = base\n for key, value in additional.items():\n if isinstance(value, collections.Mapping):\n merged[key] = merge_dict(merged.get(key), value)\n else:\n merged[key] = value\n\n return merged\n\n\ndef convert_to_attr_dict(dictionary):\n output = {}\n for key, value in dictionary.items():\n if isinstance(value, collections.Mapping):\n output[key] = convert_to_attr_dict(value)\n else:\n output[key] = value\n return AttributeDict(output)\n","sub_path":"scripts/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69735523","text":"#!/usr/bin/env python3\n\n\"\"\" Copyright © 2020 Borys Olifirov\n\nTest experiment with NP-EGTA + Fluo-4 in HEK cells.\n24-27,07.2020\n\n\"\"\"\n\nimport sys\nimport os\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.mplot3d import Axes3D\n\nsys.path.append('modules')\nimport oifpars as op\nimport edge\n\n\n\nplt.style.use('dark_background')\nplt.rcParams['figure.facecolor'] = '#272b30'\nplt.rcParams['image.cmap'] = 'inferno'\n\nFORMAT = \"%(asctime)s| %(levelname)s [%(filename)s: - %(funcName)20s] %(message)s\"\nlogging.basicConfig(level=logging.INFO,\n format=FORMAT)\n\n\n\ndata_path = os.path.join(sys.path[0], 'fluo_data')\n\nall_cells = op.WDPars(data_path)\none_cell = 2\n\n# # Fluo-4 bleachin experiment\n# df = pd.DataFrame(columns=['cell', 'exp', 'cycl', 'time', 'int'])\n# for cell_num in range(0, len(all_cells)):\n# cell = all_cells[cell_num]\n# series_int = cell.relInt()\n# for single_num in range(len(series_int)):\n# single_int = series_int[single_num]\n# df = df.append(pd.Series([int(cell_num+1), cell.exposure, cell.cycles, int(single_num+1), single_int],\n# index=df.columns),\n# ignore_index=True)\n\n\n# PA in loading solution experiment (1 - no PA, 2 - with PA)\ndf = pd.DataFrame(columns=['cell', 'PA', 'time', 'int'])\nfor cell_num in range(0, len(all_cells)):\n cell = all_cells[cell_num]\n logging.info('Image {} in progress'.format(cell.img_name))\n\n series_int, mask, gauss = cell.relInt(high_lim=0.8, init_low=0.05, mask_diff=40, sigma=3, noise_size=40)\n\n # try: # register exceptions from lowHyst function\n # \tseries_int, mask, gauss = cell.relInt(high_lim=0.8, init_low=0.05, mask_diff=40, sigma=3, noise_size=40)\n # except RuntimeError:\n # \tlogging.fatal('For image {} relative intensity DON`T calculated, RE!\\n'.format(cell.img_name))\n # \tcontinue\n # except ValueError:\n # \tlogging.fatal('For image {} relative intensity DON`T calculated, VE!\\n'.format(cell.img_name))\n # \tcontinue\n\n loading_type = cell.load\n for single_num in range(len(series_int)):\n single_int = series_int[single_num]\n df = df.append(pd.Series([int(cell_num+1), loading_type, int(single_num+1), single_int],\n index=df.columns),\n ignore_index=True)\n\ndf.to_csv('results.csv', index=False)\n\n\nax0 = plt.subplot(131)\nslc0 = ax0.imshow(all_cells[one_cell].max_frame)\nslc0.set_clim(vmin=0, vmax=np.max(all_cells[one_cell].max_frame)) \ndiv0 = make_axes_locatable(ax0)\ncax0 = div0.append_axes('right', size='3%', pad=0.1)\nplt.colorbar(slc0, cax=cax0)\nax0.set_title(all_cells[one_cell].img_name)\n\nax1 = plt.subplot(133)\nax1.imshow(all_cells[one_cell].cell_mask)\nax1.set_title('mask')\n\nax2 = plt.subplot(132)\nslc2 = ax2.imshow(all_cells[one_cell].max_gauss)\n# slc2.set_clim(vmin=0, vmax=np.max(all_cells[one_cell].max_frame)) \ndiv2 = make_axes_locatable(ax2)\ncax2 = div2.append_axes('right', size='3%', pad=0.1)\nplt.colorbar(slc2, cax=cax2)\nax2.set_title('gauss')\n\n\nplt.tight_layout()\nplt.show()\n\n\n","sub_path":"fluo_test.py","file_name":"fluo_test.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510142590","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# 定数定義\nORG_WINDOW_NAME = \"org\"\nGRAY_WINDOW_NAME = \"gray\"\nCANNY_WINDOW_NAME = \"canny\"\n\nORG_FILE_NAME = \"data/isojin.jpg\"\nGRAY_FILE_NAME = \"gray.jpg\"\nCANNY_FILE_NAME = \"canny.jpg\"\n\n# 元の画像を読み込む\norg_img = cv2.imread(ORG_FILE_NAME, cv2.IMREAD_UNCHANGED)\n#org_img = cv2.cvtColor(org_img, cv2.COLOR_RGB2BGR)\n\n#背景画像の読み込み\nback_img = cv2.imread(\"data/background2.jpg\", cv2.IMREAD_UNCHANGED)\n\n#画像のサイズを合わせる\nheight,width =[800,1100]\nprint(height,width)\n\norg_img = cv2.resize(org_img, dsize=(width,height))\nback_img = cv2.resize(back_img, dsize=(width,height))\nheight,width = org_img.shape[:2]\nprint(height,width)\n\n \n# グレースケールに変換\ngray_img = cv2.cvtColor(org_img, cv2.COLOR_BGR2GRAY)\n\n# フィルターによる平滑化処理\nfor i in range(10):\n gray_img = cv2.bilateralFilter(gray_img, 15, 20, 20)\n gray_img = cv2.cvtColor(gray_img, cv2.COLOR_RGB2BGR)\n\n# エッジ抽出\ncanny_img = cv2.Canny(gray_img, 50, 110)\n\n# ウィンドウに表示\n\nfig = plt.figure(figsize=(16,9))\nplt.subplot(121)\nplt.title(ORG_WINDOW_NAME)\nplt.imshow(org_img)\nplt.subplot(122)\nplt.title(GRAY_WINDOW_NAME)\nplt.imshow(gray_img)\n\nfig = plt.figure(figsize=(16,9))\nplt.title(CANNY_WINDOW_NAME)\nplt.imshow(canny_img)\n\nplt.show()\n\n\nkernel = np.ones((200,200),np.uint8)\nmask = cv2.morphologyEx(canny_img, cv2.MORPH_CLOSE, kernel)\nimv_mask=cv2.bitwise_not(mask)\n\nfig = plt.figure(figsize=(16,9))\nplt.subplot(121)\nplt.title(\"maskedimg\")\nplt.imshow(mask)\nplt.subplot(122)\nplt.title(\"inv_maskedimg\")\nplt.imshow(imv_mask)\nplt.show()\n\n\ntrimed_img = cv2.bitwise_and(org_img, org_img, mask=mask)\nfig = plt.figure(figsize=(16,9))\n\n\nplt.title(\"trimed_img\")\nplt.imshow(trimed_img)\nplt.show()\n\n#合成用の背景作成\nback_img = cv2.bitwise_and(back_img, back_img,mask=imv_mask)\n\n\nfig = plt.figure(figsize=(16,9))\nplt.subplot(121)\nplt.title(\"back_img\")\nplt.imshow(back_img)\nplt.show()\n\n\n#trimed_img = cv2.resize(trimed_img, dsize=(width,height))\nback_img_output = cv2.cvtColor(back_img, cv2.COLOR_RGB2BGR)\noverlayed_img = np.minimum( trimed_img+back_img_output, 255).astype(np.uint8)\n\nfig = plt.figure(figsize=(16,9))\nplt.title(\"fusion_img\")\nplt.imshow(overlayed_img)\nplt.show()\n","sub_path":"Synthesis.py","file_name":"Synthesis.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"652811959","text":"from panda_env import PandaEnv\nfrom spinup import ppo_pytorch as ppo\nimport torch.nn as nn\nimport gym\nimport gym_panda\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport cv2\n\nimport spinup.algos.pytorch.ppo.core as core\n\n\nclass ProcessFrame84(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(ProcessFrame84, self).__init__(env)\n self.env = env\n self.observation_space = gym.spaces.Box(\n low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)\n\n def observation(self, obs):\n return ProcessFrame84.process(self.env.render(mode='rgb_array'))\n\n def process(frame):\n if frame.size == 720 * 960 * 3:\n img = np.reshape(frame, [720, 960, 3]).astype(\n np.float32)\n else:\n assert False, \"Unknown resolution.\"\n img = img[:, :, 0] * 0.399 + img[:, :, 1] * 0.587 + \\\n img[:, :, 2] * 0.114\n\n resized_screen = cv2.resize(\n img, (112, 84), interpolation=cv2.INTER_AREA)\n y_t = resized_screen[:, 14:98]\n y_t = np.reshape(y_t, [84, 84, 1])\n return y_t.astype(np.uint8)\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n new_shape = (old_shape[-1], old_shape[0], old_shape[1])\n self.observation_space = gym.spaces.Box(\n low=0.0, high=1.0, shape=new_shape, dtype=np.float32)\n\n def observation(self, observation):\n return np.moveaxis(observation, 2, 0)\n\nclass MoveTowardZ(gym.ActionWrapper):\n def __init__(self, env):\n super(MoveTowardZ, self).__init__(env)\n\n def action(self, action):\n action[2] = -.3\n return action\n\n\nenv = gym.make('panda-v0')\nenv = ProcessFrame84(env)\nenv = ImageToPyTorch(env)\nenv = MoveTowardZ(env)\n\n# image = env.reset()\n# plt.figure()\n# plt.imshow(image.squeeze(),cmap='gray')\n# plt.title('Example extracted screen')\n# plt.show()\n\n\n\nenv_fn = lambda : env\nac_kwargs = dict(hidden_sizes=[18,64,64], activation=nn.ReLU)\n\nlogger_kwargs = dict(output_dir='spinup', exp_name='panda_ppo')\n\nppo(env_fn=env_fn,actor_critic=core.CNNActorCritic, ac_kwargs=ac_kwargs, steps_per_epoch=5000, epochs=250, logger_kwargs=logger_kwargs)","sub_path":"robots/panda/pand_ppo.py","file_name":"pand_ppo.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621184190","text":"#!/usr/bin/env python\nimport numpy as np\nimport pickle\nimport rospy\nimport argparse\n\nfrom sensor_stick.pcl_helper import *\nfrom sensor_stick.training_helper import spawn_model\nfrom sensor_stick.training_helper import delete_model\nfrom sensor_stick.training_helper import initial_setup\nfrom sensor_stick.training_helper import capture_sample\nfrom sensor_stick.features import compute_color_histograms\nfrom sensor_stick.features import compute_normal_histograms\nfrom sensor_stick.srv import GetNormals\nfrom geometry_msgs.msg import Pose\nfrom sensor_msgs.msg import PointCloud2\n\n\ndef get_normals(cloud):\n get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)\n return get_normals_prox(cloud).cluster\n\n\nif __name__ == '__main__':\n rospy.init_node('capture_node')\n\n # Argument Parsing\n parser = argparse.ArgumentParser()\n parser.add_argument('-l', '--list', action='store', dest='num',\n help='Pick a valid list number', type=int, required=True)\n parser.add_argument('-t', '--times', action='store', dest='num_times', default = 50,\n help='Number of times to capture features for each model', type=int)\n parser.add_argument('-no_hsv', action='store_false', dest='flag', default = True,\n help='Compute color hist using HSV')\n args = parser.parse_args()\n pick_list = args.num\n ntimes = args.num_times\n hsv_flag = args.flag\n\n # Capture features based on user input\n if pick_list == 1:\n print(\"Capturing features for pick_list_1\")\n models = [\\\n 'biscuits',\n 'soap',\n 'soap2']\n elif pick_list == 2:\n print(\"Capturing features for pick_list_2\")\n models = [\\\n 'biscuits',\n 'soap',\n 'book',\n 'soap2',\n 'glue']\n elif pick_list == 3:\n print(\"Capturing features for pick_list_3\")\n models = [\\\n 'sticky_notes',\n 'book',\n 'snacks',\n 'biscuits',\n 'eraser',\n 'soap2',\n 'soap',\n 'glue']\n else:\n print(\"Capturing features for general list\")\n models = [\\\n 'beer',\n 'bowl',\n 'create',\n 'disk_part',\n 'hammer',\n 'plastic_cup',\n 'soda_can']\n\n\n # Disable gravity and delete the ground plane\n initial_setup()\n labeled_features = []\n\n for model_name in models:\n spawn_model(model_name)\n\n for i in range(ntimes):\n # make five attempts to get a valid a point cloud then give up\n sample_was_good = False\n try_count = 0\n while not sample_was_good and try_count < 5:\n sample_cloud = capture_sample()\n sample_cloud_arr = ros_to_pcl(sample_cloud).to_array()\n\n # Check for invalid clouds.\n if sample_cloud_arr.shape[0] == 0:\n print('Invalid cloud detected')\n try_count += 1\n else:\n sample_was_good = True\n\n # Extract histogram features\n chists = compute_color_histograms(sample_cloud, using_hsv=hsv_flag)\n normals = get_normals(sample_cloud)\n nhists = compute_normal_histograms(normals)\n feature = np.concatenate((chists, nhists))\n labeled_features.append([feature, model_name])\n\n delete_model()\n\n file_name = 'training_set_' + str(pick_list) + '.sav'\n pickle.dump(labeled_features, open(file_name, 'wb'))\n\n","sub_path":"Exercise-3/sensor_stick/scripts/capture_features.py","file_name":"capture_features.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"97037041","text":"# coding: utf-8\nfrom __future__ import print_function, unicode_literals\nimport argparse\nimport sys\nimport pkg_resources\nimport logging\n\nfrom lxml import etree\n\nimport packtools\nfrom packtools import catalogs\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass XMLError(Exception):\n \"\"\" Represents errors that would block HTMLGenerator instance from\n being created.\n \"\"\"\n\n\ndef get_htmlgenerator(\n xmlpath, no_network, no_checks, css, print_css, js, permlink, url_article_page, url_download_ris\n):\n try:\n parsed_xml = packtools.XML(xmlpath, no_network=no_network)\n except IOError as e:\n raise XMLError('Error reading %s. Make sure it is a valid file-path or URL.' % xmlpath)\n except etree.XMLSyntaxError as e:\n raise XMLError('Error reading %s. Syntax error: %s' % (xmlpath, e))\n\n try:\n valid_only = not no_checks\n generator = packtools.HTMLGenerator.parse(\n parsed_xml, valid_only=valid_only, css=css,\n print_css=print_css, js=js,\n permlink=permlink,\n url_article_page=url_article_page,\n url_download_ris=url_download_ris)\n except ValueError as e:\n raise XMLError('Error reading %s. %s.' % (xmlpath, e))\n\n return generator\n\n\n@packtools.utils.config_xml_catalog\ndef main():\n\n packtools_version = pkg_resources.get_distribution('packtools').version\n\n parser = argparse.ArgumentParser(description='HTML generator cli utility')\n parser.add_argument('--nonetwork', action='store_true',\n help='prevents the retrieval of the DTD through the network')\n parser.add_argument('--nochecks', action='store_true',\n help='prevents the validation against SciELO PS spec')\n parser.add_argument('--css', default=catalogs.HTML_GEN_DEFAULT_CSS_PATH,\n help='URL or full path of the CSS file to use with generated htmls')\n parser.add_argument('--print_css', default=catalogs.HTML_GEN_DEFAULT_PRINT_CSS_PATH,\n help='URL or full path of the CSS (media: print) file to use with generated htmls')\n parser.add_argument('--js', default=catalogs.HTML_GEN_DEFAULT_JS_PATH,\n help='URL or full path of the JS file to use with generated htmls')\n parser.add_argument('--permlink', default='',\n help='Permanente URL to access the article')\n parser.add_argument('--url_article_page', default='',\n help='OPAC URL to access the article')\n parser.add_argument('--url_download_ris', default='',\n help='URL to download RIS file (how to cite this article)')\n parser.add_argument('XML', nargs='+',\n help='filesystem path or URL to the XML')\n parser.add_argument('--version', action='version', version=packtools_version)\n parser.add_argument('--loglevel', default='WARNING')\n args = parser.parse_args()\n\n logging.basicConfig(level=getattr(logging, args.loglevel.upper()))\n\n print('Please wait, this may take a while...', file=sys.stderr)\n\n for xml in packtools.utils.flatten(args.XML):\n LOGGER.info('starting generation of %s' % (xml,))\n\n try:\n html_generator = get_htmlgenerator(\n xml, args.nonetwork, args.nochecks,\n args.css, args.print_css, args.js,\n args.permlink, args.url_article_page, args.url_download_ris)\n LOGGER.debug('HTMLGenerator repr: %s' % repr(html_generator))\n except XMLError as e:\n LOGGER.debug(e)\n LOGGER.warning('Error generating %s. Skipping. Run with DEBUG for more info.', xml)\n continue\n\n try:\n for lang, trans_result in html_generator:\n fname, fext = xml.rsplit('.', 1)\n out_fname = '.'.join([fname, lang, 'html'])\n\n with open(out_fname, 'wb') as fp:\n fp.write(etree.tostring(trans_result, pretty_print=True,\n encoding='utf-8', method='html',\n doctype=u\"\"))\n\n print('Generated HTML file:', out_fname)\n except TypeError as e:\n LOGGER.debug(e)\n LOGGER.warning('Error generating %s. Skipping. Run with DEBUG for more info.', xml)\n continue\n\n LOGGER.info('finished generating %s' % (xml,))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"packtools/htmlgenerator.py","file_name":"htmlgenerator.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131673211","text":"from mlbaselines.prime import PrimeMonitor\nfrom mlbaselines.worker import Worker\n\n\ndef remove(name):\n try:\n shutil.rmtree(name)\n except FileNotFoundError:\n pass\n\n\nif __name__ == '__main__':\n import shutil\n from multiprocessing import Process\n\n remove('/tmp/cockroach/queue')\n remove('/tmp/mongo')\n\n # Create the initial broker to initialize the system\n mq = 'mongodb://192.168.0.10:8123'\n init = PrimeMonitor(mq)\n\n # start a pool of workers\n def make_worker(id):\n w = Worker(mq, worker_id=id)\n w.run()\n return w\n\n workers = []\n for i in range(3):\n w = Process(target=make_worker, args=(i,))\n w.start()\n\n # init the pool of workers\n # Restore previous session if any\n init.restore_session()\n\n # queue two brokers for redundancy\n # in our case we are using a single node so we ignore that\n # master.queue_broker()\n # master.queue_broker()\n\n # queue HPO creation\n init.queue_hpo()\n\n # wait for HPO to finish\n init.wait()\n\n # shutdown system\n init.shutdown()\n\n","sub_path":"tests/e2e/test_system.py","file_name":"test_system.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"441340506","text":"#!/usr/bin/env python3\n\nimport os\n\nfrom app import db\nfrom app.models import Setting\n\n\n# PDNS Admin settings\nlegal_envvars_setting = (\n 'MAINTENANCE',\n 'FULLSCREEN_LAYOUT',\n 'RECORD_HELPER',\n 'LOGIN_LDAP_FIRST',\n 'DEFAULT_RECORD_TABLE_SIZE',\n 'DEFAULT_DOMAIN_TABLE_SIZE',\n 'AUTO_PTR',\n 'RECORD_QUICK_EDIT',\n 'PRETTY_IPV6_PTR',\n 'DNSSEC_ADMINS_ONLY',\n 'ALLOW_USER_CREATE_DOMAIN',\n 'BG_DOMAIN_UPDATES',\n 'SITE_NAME',\n 'SESSION_TIMEOUT',\n 'PDNS_API_URL',\n 'PDNS_API_KEY',\n 'PDNS_VERSION',\n 'LOCAL_DB_ENABLED',\n 'SIGNUP_ENABLED',\n 'LDAP_ENABLED',\n 'LDAP_TYPE',\n 'LDAP_URI',\n 'LDAP_BASE_DN',\n 'LDAP_ADMIN_USERNAME',\n 'LDAP_ADMIN_PASSWORD',\n 'LDAP_FILTER_BASIC',\n 'LDAP_FILTER_USERNAME',\n 'LDAP_SG_ENABLED',\n 'LDAP_ADMIN_GROUP',\n 'LDAP_OPERATOR_GROUP',\n 'LDAP_USER_GROUP',\n 'LDAP_DOMAIN',\n 'GITHUB_OAUTH_ENABLED',\n 'GITHUB_OAUTH_KEY',\n 'GITHUB_OAUTH_SECRET',\n 'GITHUB_OAUTH_SCOPE',\n 'GITHUB_OAUTH_API_URL',\n 'GITHUB_OAUTH_TOKEN_URL',\n 'GITHUB_OAUTH_AUTHORIZE_URL',\n 'GOOGLE_OAUTH_ENABLED',\n 'GOOGLE_OAUTH_CLIENT_ID',\n 'GOOGLE_OAUTH_CLIENT_SECRET',\n 'GOOGLE_TOKEN_URL',\n 'GOOGLE_OAUTH_SCOPE',\n 'GOOGLE_AUTHORIZE_URL',\n 'GOOGLE_BASE_URL',\n 'OIDC_OAUTH_ENABLED',\n 'OIDC_OAUTH_KEY',\n 'OIDC_OAUTH_SECRET',\n 'OIDC_OAUTH_SCOPE',\n 'OIDC_OAUTH_API_URL',\n 'OIDC_OAUTH_TOKEN_URL',\n 'OIDC_OAUTH_AUTHORIZE_URL',\n 'FORWARD_RECORDS_ALLOW_EDIT',\n 'REVERSE_RECORDS_ALLOW_EDIT',\n 'TTL_OPTIONS'\n)\n\n\n# add every setting from environment variables\nimport os\nimport sys\nfor v in legal_envvars_setting:\n if v in os.environ:\n name = v.lower()\n value = os.environ[v]\n setting = Setting(name=name, value=value)\n db.session.add(setting)\n\n\ndb.session.commit()\n","sub_path":"powerdns-admin-v0.1/init_setting.py","file_name":"init_setting.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"139316889","text":"#impot des librairies\r\nimport csv\r\nimport pandas as pd\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nimport re\r\n\r\n#Fonction de suppression des éléments non utiles \r\ndef remove_content(text):\r\n\r\n text = text.lower()\r\n text = text.replace('\\n', ' ').replace('\\r', ' ')\r\n text = ' '.join(text.split())\r\n text = re.sub(r\"https\\S+\", \" \", text) #remove urls\r\n text = text.replace('t.co', ' ')\r\n text = text.replace('&', ' ')\r\n text = re.sub(r\"http\\S+\", \" \", text) #remove urls\r\n text=re.sub(r'\\S+\\.com\\S+',' ',text) #remove urls\r\n text=re.sub(r'\\@\\w+',' ',text) #remove mentions\r\n text =re.sub(r'\\#\\w+',' ',text) #remove hashtags\t\r\n\r\n return text\r\n\r\n#Fonction de nettoyage des tweets \r\ndef process_text(text): #clean text\r\n\tclean = ''\r\n\ttext=remove_content(text)\r\n\r\n #Découpage des phrases en mot (token)\r\n\ttext = re.sub('[^A-Za-z]', ' ', text.lower()) #remove non-alphabets\r\n\ttokenized_text = nltk.word_tokenize(text) #tokenize\r\n\t#Déclaration des stops words dans une variable\r\n\tstop_words = stopwords.words('english')\r\n\t#Par cette méthode, on garde supprime les mots présents dans la liste des stop words \r\n\t#et on élimine ainsi tous les petits mots parasites (the, is, an, and...)\r\n\tclean_text = [\r\n\tword for word in tokenized_text\r\n\tif word not in stop_words ] \r\n\tif len(clean_text) > 0:\r\n\t clean = ' '.join(clean_text)\r\n\telse:\r\n\t clean = '_'\r\n\r\n\treturn clean\r\n\r\n\r\n\r\n\r\n#lecture du csv \r\n# A MODIFIER\r\ndf = pd.read_csv('fichiers/all_tweets_realDonaldTrump.csv')\r\n#creation d'un champ clean contenant le text stemminisé\r\ndf['cleaned_tweets']=df['tweet'].apply(lambda x: process_text(x))\r\n#sauvegarde du csv\r\n#A MODIFIER\r\ndf.to_csv('fichiers/all_tweets_realDonaldTrump_cleaned.csv', index=False, encoding='utf-8-sig',quotechar='\"',\r\n quoting=csv.QUOTE_NONNUMERIC)\r\n","sub_path":"nettoyage_tweets.py","file_name":"nettoyage_tweets.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"356792885","text":"from bs4 import BeautifulSoup\n\nITEM_HTML = \"\"\"\"\n \n \n \n \n HTML PARSER\n \n \n

This is a new file.

\n

Numbers are created by numeric literals.

\n

Another paragraph without a class.

\n
    \n
  • Chicken
  • \n
  • Mutton
  • \n
  • Fish
  • \n
  • Eggs
  • \n
  • Rice
  • \n
\n \n \n \n\"\"\"\n\nsimple_soup = BeautifulSoup(ITEM_HTML, 'html.parser')\n\n\ndef find_title():\n h1_tag = simple_soup.find('h1')\n print(h1_tag.string)\n\n\ndef find_list_items():\n list_items = simple_soup.find_all('li')\n list_content = [e.string for e in list_items]\n print(list_content)\n\n\ndef find_subtitle():\n paragraph = simple_soup.find('p', {\"class\": \"subtitle\"})\n print(paragraph.string)\n\n\ndef find_other_paragraphs():\n paragraphs = simple_soup.find_all('p')\n other_paragraph = [p.string for p in paragraphs if 'subtitle' not in p.attrs.get('class', [])]\n print(other_paragraph[0])\n\n\nfind_title()\nfind_list_items()\nfind_subtitle()\nfind_other_paragraphs()\n","sub_path":"Web_Scraping/simple_html.py","file_name":"simple_html.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128614038","text":"import sys\nimport string\n\n__author__ = 'Trevor Martin'\n\nprint(\"Welcome to my concorder!\\nIF YOU WANT TO QUIT, type in \"\"\\\"q\"\"\\\" where it says enter file\")\n\ndef setup_concordance(lines, filename):\n proper_form = [line.strip().split() for line in lines]\n proper_form = list(filter(None, proper_form))\n # now each index of proper_form is a (line number - 1)\n punCd = lambda line: [elt.strip(string.punctuation).lower() for elt in line]\n clean_text = [punCd(line) for line in proper_form]\n clean_textv2 = [list(filter(None, line)) for line in clean_text]\n # now clean_textv2 has no empty strings\n uniq_words = set([sub_elt for elt in clean_textv2 for sub_elt in elt])\n word_concord = dict(zip(list(uniq_words), [[] for _ in range(len(uniq_words))]))\n for index, line in enumerate(clean_textv2): \n for word in line:\n word_concord[word].append(index+1) # recall the indices are one off\n for key in sorted(list(word_concord.keys())):\n line_nums = \" \".join([str(elt) for elt in word_concord[key]])\n print(f\"{key} : {line_nums[:]}\") \n num_uniq_words = len(uniq_words)\n num_lines = len(clean_textv2)\n print(f\"There are {num_uniq_words} unique words in {num_lines} lines\")\n\n\ndef main():\n while True:\n try:\n file = input(\"Enter file: \")\n if str(file) == 'q': sys.exit()\n with open(file) as f:\n lines = f.readlines()\n filename = str(file)\n setup_concordance(lines, filename) \n except FileNotFoundError:\n print(\"There was something erroneous with the file, try again please.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"concordance.py","file_name":"concordance.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251308494","text":"\"\"\"Solutionbooks are the bread-and-butter of AI@UCF. These Notebooks are used\nin all variety of tutorials and can be rendered as:\n* Workbooks, to enable interactive tutorial/self-study sessions.\n* Markdown Posts, to enable publication on the club website.\n\nCurrently, the following can be invoked:\n* :meth:`.standardize`\n* :meth:`.to_post`\n* :meth:`.to_workbook`\n\"\"\"\nimport pdb\nimport json\n\nimport nbformat as nbf\nfrom jinja2 import Template\nfrom nbconvert.exporters import MarkdownExporter, NotebookExporter\nfrom nbconvert.preprocessors import TagRemovePreprocessor\nfrom nbgrader.preprocessors import ClearOutput, ClearSolutions\n\nfrom .. import j2env, read_from_disk\nfrom ..meeting import search\nfrom . import website\n\n\ndef make_solutionbook(ctx, query, **kwargs):\n \"\"\"Ensures that all Solutionbooks have accurate headings, pathing, and metadata.\n \"\"\"\n m = search(ctx, query)\n\n _solnbook = ctx.settings.suffixes.solutionbook\n\n standard = NotebookExporter()\n standard.register_preprocessor(\n TagRemovePreprocessor(remove_cell_tags=[\"template\"]), enabled=True\n )\n\n setattr(m, \"group\", ctx.group)\n path = ctx.path / str(m) / f\"{m.filename}{_solnbook}\"\n # If the notebook doesn't exist, or it's empty\n if not path.exists() or path.stat().st_size == 0:\n nb = nbf.v4.new_notebook()\n nbf.write(nb, open(path, \"w\"))\n\n nb, _ = standard.from_filename(str(path))\n nb = nbf.reads(nb, as_version=4)\n\n # Inject Heading\n html_header = j2env.get_template(\"notebooks/header.html.j2\")\n banner_url = Template(ctx.settings.hugo.banner_url).render(\n group=ctx.group, meeting=m\n )\n header = html_header.render(banner_url=banner_url, meeting=m)\n header_metadata = {\"title\": m.title, \"tags\": [\"nb-title\", \"template\"]}\n nb.cells.insert(0, nbf.v4.new_markdown_cell(header, metadata=header_metadata))\n\n # Inject data-loading cell\n from ..apis import kaggle\n\n py_dataset_path = j2env.get_template(\"notebooks/data-pathing.py.j2\")\n dataset = py_dataset_path.render(slug=kaggle.slug_competition(ctx, m))\n dataset_metadata = {\"language\": \"python\", \"tags\": [\"template\"]}\n nb.cells.insert(1, nbf.v4.new_code_cell(dataset, metadata=dataset_metadata))\n\n # Inject Notebook Metadata\n nb_metadata = j2env.get_template(\"notebooks/nb-metadata.json.j2\")\n metadata = nb_metadata.render(meeting=m)\n nb.metadata.update(json.loads(metadata))\n\n nbf.write(nb, open(path, \"w\"))\n\n\ndef make_workbook(ctx, query, **kwargs):\n \"\"\"Generates a Workbook from a Solutionbook.\n\n Workbooks are stripped down Solutionbooks that, namely:\n - Have no output cells.\n - Replace `### BEGIN SOLUTION ... ### END SOLUTION` blocks with\n `raise NotImplementedError()` snippets for viewers to practice on.\n \"\"\"\n m = search(ctx, query)\n\n workbook = NotebookExporter()\n\n workbook.register_preprocessor(ClearOutput(), enabled=True)\n workbook.register_preprocessor(ClearSolutions(enforce_metadata=False), enabled=True)\n # TODO migrate back to `enforce_metadata=True`\n\n # workbook.register_preprocessor(ValidateNBGrader(), enabled=True)\n # this is only useful if we can migrate back to `enforce_metadata=True`\n\n _workbook = ctx.settings.suffixes.workbook\n _solnbook = ctx.settings.suffixes.solutionbook\n\n try:\n path = ctx.path / str(m) / m.filename\n\n nb, _ = workbook.from_filename(str(path.with_suffix(_solnbook)))\n nb = nbf.reads(nb, as_version=4)\n\n nbf.write(nb, open(path.with_suffix(_workbook), \"w\"))\n except Exception:\n raise Exception(f\"Workbook export failed on `{m}`.\")\n\n\ndef make_post(ctx, query, **kwargs):\n \"\"\"Preprocess a Solutionbook and prepare it to post on https://ucfai.org/.\n \"\"\"\n m = search(ctx, query)\n\n _solnbook = ctx.settings.suffixes.solutionbook\n\n as_post = MarkdownExporter()\n as_post.extra_loaders = [j2env.loader]\n as_post.template_file = f\"notebooks/to-post.md.j2\"\n as_post.no_prompt = True\n as_post.register_preprocessor(\n TagRemovePreprocessor(remove_cell_tags=[\"nb-title\"], enabled=True)\n )\n\n name = ctx.path / f\"{m}/{m.filename}{_solnbook}\"\n # Default to `git`-based \"Last modified...\"\n # lastmod = pd.Timestamp(name.stat().st_mtime, unit=\"s\")\n # setattr(m, \"lastmod\", lastmod)\n\n nb, _ = as_post.from_filename(str(name))\n\n weight = kwargs.get(\"weight\", -1)\n try:\n website.touch_meeting(ctx, m, body=nb, weight=weight)\n except:\n raise\n pdb.set_trace()\n","sub_path":"src/components/notebook.py","file_name":"notebook.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104967802","text":"from RJProject import apiApp, apiSocket\nfrom RJProject.db.tables import Tweets, Authors\nfrom RJProject.db.utils import get_author_by_id, get_author, get_sales_by_id, get_author_by_name, get_timezone_by_id, update_author\nfrom RJProject.db import get_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom flask import jsonify, render_template\nfrom datetime import timedelta, datetime\nfrom threading import Thread\nimport json\nimport time\nfrom flask_sockets import Sockets\n\n\ndef query_recent_tweets():\n engine = get_engine(\n host=apiApp.config['dbHost'],\n user=apiApp.config['dbUser'],\n password=apiApp.config['dbPassword'],\n database=apiApp.config['dbName'])\n session = sessionmaker(bind=engine)\n s = session()\n recent = datetime.utcnow() - timedelta(seconds=2)\n recentTweets = s.query(Tweets).filter(\n Tweets.time >= recent).order_by(\n Tweets.time).all()\n rsp = []\n for twt in recentTweets:\n rsp.append(build_tweet_object(twt=twt))\n s.close()\n return rsp\n\n\ndef build_tweet_object(twt=None, author=None):\n if author is None:\n engine = get_engine(\n host=apiApp.config['dbHost'],\n user=apiApp.config['dbUser'],\n password=apiApp.config['dbPassword'],\n database=apiApp.config['dbName'])\n author = get_author_by_id(engine=engine, user_id=twt.user_id)\n obj = {\n \"id\": twt.tweet_id,\n \"text\": twt.tweet,\n \"time\": twt.time.strftime(\"%H:%M:%S %m-%d-%Y\"),\n \"author\": author.name,\n \"avatar\": author.img,\n \"contacted\": author.contacted,\n \"hashtags\": [{\n \"name\": h.name,\n \"id\": h.hashtag_id\n } for h in twt.hashtags]\n }\n if author.assigned_to is not None:\n owner = get_sales_by_id(engine=engine, sales_id=author.assigned_to)\n obj.update({\"owner\": owner.email})\n else:\n obj.update({\"owner\": None})\n return obj\n\n\ndef get_author_recent_tweets(engine=None, author=None):\n session = sessionmaker(bind=engine)\n s = session()\n recentTweets = s.query(Tweets).filter(\n Tweets.user_id == author.user_id).limit(25).all()\n tweets = [\n build_tweet_object(\n twt=twt,\n author=author) for twt in recentTweets]\n s.close()\n return tweets\n\n\n@apiSocket.route('/recentTweets')\ndef get_recent_tweets(ws):\n count = 0\n while True:\n rsp = query_recent_tweets()\n if len(rsp) >= 1:\n ws.send(json.dumps(rsp))\n time.sleep(3)\n\n\ndef query_author(name):\n engine = get_engine(\n host=apiApp.config['dbHost'],\n user=apiApp.config['dbUser'],\n password=apiApp.config['dbPassword'],\n database=apiApp.config['dbName'])\n author = get_author_by_name(engine=engine, name=name)\n tzone = get_timezone_by_id(engine=engine, time_zone_id=author.time_zone)\n rsp = {\n \"id\": author.user_id,\n \"name\": author.name,\n \"img\": author.img,\n \"location\": author.location,\n \"time_zone\": tzone.name,\n \"time_offset\": tzone.offset,\n \"description\": author.description,\n \"contacted\": author.contacted,\n \"ignore\": author.ignore,\n \"salesforce_id\": author.salesforce_id\n }\n if author.assigned_to is not None:\n owner = get_sales_by_id(sales_id=author.assigned_to)\n rsp.update({\"owner\": {\n \"name\": owner.name,\n \"email\": owner.email\n }\n })\n else:\n rsp.update({\"owner\": None})\n recentTweets = get_author_recent_tweets(engine=engine, author=author)\n rsp.update({\"recentTweets\": recentTweets})\n return rsp\n\n\n@apiApp.route('/getAuthor/')\ndef api_author(name):\n rsp = query_author(name)\n return jsonify(rsp)\n\n\n@apiApp.route('/takeAuthor/', methods=['POST'])\ndef take_author(name):\n try:\n engine = get_engine(\n host=apiApp.config['dbHost'],\n user=apiApp.config['dbUser'],\n password=apiApp.config['dbPassword'],\n database=apiApp.config['dbName'])\n author = get_author_by_name(name=name)\n author.assigned_to = request.form[\"sales_id\"]\n update_author(engine=engine, author=author)\n return jsonify({\"message\": \"success\"})\n except Exception as e:\n return jsonify({\"error\": \"failed to assign author\"})\n\n\n@apiApp.route(\"/get/Authors\")\ndef get_all_authors():\n engine = get_engine(\n host=apiApp.config['dbHost'],\n user=apiApp.config['dbUser'],\n password=apiApp.config['dbPassword'],\n database=apiApp.config['dbName'])\n sessFactory = sessionmaker(bind=engine)\n session = scoped_session(sessFactory)\n s = session()\n authors = s.query(Authors).order_by(Authors.name).all()\n tbody = []\n for author in authors:\n tzone = get_timezone_by_id(\n engine=engine,\n time_zone_id=author.time_zone)\n tbody.append([{\"onclick\": \"go_to_author\",\n \"name\": author.name,\n \"link\": \"/author/%s\" % author.name},\n author.location,\n author.description,\n author.contacted,\n tzone.name,\n len(author.tweets)])\n rsp = {\n \"pageCount\": 5,\n \"pageItems\": 10,\n \"pagination\": False,\n \"style\": {\n \"class\": \"table table-hover\",\n \"columnWidth\": None,\n \"width\": None},\n \"tbody\": tbody,\n \"thead\": [\n \"Name\",\n \"Location\",\n \"Description\",\n \"Contacted\",\n \"Time Zone\",\n \"Tweet Count\"]}\n s.close()\n return jsonify(rsp)\n","sub_path":"RJProject/api/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561036937","text":"# detects the cube and draw square around it (dark background)\n# import the necessary packages\nfrom CubeSide import CubeSide\nimport numpy as np\nimport cv2\n\n# load the image\nwebcam = cv2.VideoCapture(\"http://10.0.0.202:8081\")\nsaveImage = True\ncube_side = CubeSide()\n\n# returns mask from grayscaled imageFrame\ndef get_graymask(imageFrame):\n img_gray = cv2.cvtColor(imageFrame, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(img_gray, 65, 255, cv2.THRESH_BINARY)\n return thresh\n\n\n# returns the position, size and the area of the cube (x,y,w,h,area)\ndef find_cube(thresh):\n x = 0\n y = 0\n w = 0\n h = 0\n contours = cv2.findContours(\n thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )[-2]\n\n max_area = 0\n # Draw Rectangle if detected a color square\n for pic, contour in enumerate(contours):\n contours, hierarchy = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n\n area = cv2.contourArea(contour)\n if area > max_area:\n max_area = area\n _, _, w, h = cv2.boundingRect(contour)\n\n ar = w / float(h) # ar = aspect ratio\n\n # if the same color is detected in a almost square (80% - 120%)\n if ar >= 0.95 and ar <= 1.05:\n x, y, w, h = cv2.boundingRect(contour)\n\n return x, y, w, h, max_area\n\n\n# draws square around the cube to the image frame\ndef draw_cube_contours(imageFrame):\n global saveImage\n thresh = get_graymask(imageFrame)\n x, y, w, h, area = find_cube(thresh)\n\n if area > 1000:\n # to save image only once and do not override it every time\n if saveImage == True:\n cv2.imwrite(\"cube.jpg\", imageFrame)\n cube_side.extract_cube_colors(\"cube.jpg\", x, y, w, h)\n cube_side.print_arr()\n saveImage = False\n\n # print(\"Rectpos: x: {0} | y: {1} |w: {2} | h: {3}\".format(x, y, w, h))\n imageFrame = cv2.rectangle(imageFrame, (x, y), (x + w, y + h), (255, 255, 0), 2)\n\n return imageFrame\n\n\ndef main():\n while 1:\n\n ret, imageFrame = webcam.read()\n graymask = get_graymask(imageFrame)\n imageFrame = draw_cube_contours(imageFrame)\n # show the video\n cv2.imshow(\"images\", imageFrame)\n cv2.imshow(\"image gray mask\", graymask)\n\n # cv2.waitKey(0)\n if cv2.waitKey(10) & 0xFF == ord(\"n\"):\n pos += 1\n if pos > 6:\n pos = 0\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n cv2.destroyAllWindows()\n break\n\n webcam.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Projects/_7_Cube_Solver/PatternRecognitionV3/cubecontourdetection_workingV1.py","file_name":"cubecontourdetection_workingV1.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624824527","text":"\n\n#calss header\nclass _SCOOTER():\n\tdef __init__(self,): \n\t\tself.name = \"SCOOTER\"\n\t\tself.definitions = [u\"a child's vehicle with two or three small wheels joined to the bottom of a narrow board and a long vertical handle attached to the front wheel. It is ridden by standing with one foot on the board and pushing against the ground with the other foot.\", u'a motor scooter ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_scooter.py","file_name":"_scooter.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"347430858","text":"import time\n\ndef follow(file):\n '''Linux tail like function'''\n file.seek(0, 2)\n while True:\n line = file.readline()\n if not line:\n time.sleep(0.1)\n continue\n yield line\n\nlogfile = open(\"/tmp/log\")\n\nfor line in follow(logfile):\n print(line)\n","sub_path":"generators/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"325497683","text":"#Time Complexity : O(N) where N is number of element\n#Space Complexity : O(B) where B is maximum breadth\n\nclass Solution(object):\n def updateMatrix(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n if len(matrix)==0:\n return\n Queue =[]\n dirs =[(0,-1),(-1,0),(1,0),(0,1)]\n distance = 1\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j]==0:\n Queue.append([i,j])\n else:\n matrix[i][j] = -1\n \n while len(Queue) >0:\n size = len(Queue)\n for j in range(size):\n curr = Queue.pop(0)\n for i in dirs:\n r = curr[0] + i[0]\n c = curr[1] + i[1]\n if r >=0 and c>=0 and r (.+) 122:\n\t\t\tresult += c\n\t\telse:\n\t\t\tresult += shift_n_letters(c, n)\n\treturn result\n\nprint(rotate ('sarah', 13))\n#>>> 'fnenu'\nprint(rotate('fnenu',13))\n#>>> 'sarah'\nprint(rotate('dave',5))\n#>>>'ifaj'\nprint(rotate('ifaj',-5))\n#>>>'dave'\nprint(rotate((\"zw pfli tfuv nfibj tfiivtkcp pfl jyflcu \"\n \"sv rscv kf ivru kyzj\"),-17))\n#>>> ???\nprint(rotate(\"a\",1))","sub_path":"Rotate.py","file_name":"Rotate.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212037992","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.datasets import fetch_20newsgroups\nimport numpy\nimport sklearn.feature_extraction.text\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\n\n\ndef classifier(clf, X, y):\n global X_train\n global y_train\n global X_test\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n clf.fit(X_train, y_train)\n y_predicted = clf.predict(X_test)\n precision = numpy.mean(y_predicted == y_test)\n print(precision) \n \ntwenty_train = fetch_20newsgroups(subset='train', shuffle=True)\ntwenty_test = fetch_20newsgroups(subset='test', shuffle=True)\ncount_vector = sklearn.feature_extraction.text.CountVectorizer()\nx = count_vector.fit_transform(twenty_train.data)\n\n\ntfidf_transformer = sklearn.feature_extraction.text.TfidfTransformer(use_idf=True,sublinear_tf=False).fit(x)\nTFIDF = tfidf_transformer.transform(x)\n#predicted_knn = text_clf_svm.predict(twenty_test.data)\nknn=KNeighborsClassifier()\nknn.fit(X_train, y_train)\nY_predict=knn.predict(X_test)\n\nn_neighbors = 5\nweights = 'uniform'\n\nfor k in range(1,25):\n print(k)\n clf = KNeighborsClassifier(n_neighbors, weights=weights)\n classifier(clf, TFIDF, twenty_train.target)\nprint(\"Accuracy:\",accuracy_score(twenty_test.target,Y_predict))\nprint(metrics.classification_report(twenty_test.target,predicted,target_names=twenty_test.target_names))\n\n","sub_path":"knn2.py","file_name":"knn2.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"380274725","text":"import torch\nimport torch.nn.functional as F\n\nfrom model.baselearner import BaseLearner\nfrom utils.load import get_dataloader\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef finetuning2(filename, batch_size1, batch_size2, hidden_size,\n meta_hidden_size, training_size, T, p):\n dataloader, input_size, output_size = get_dataloader(filename, batch_size1,\n training_size, False)\n\n model = BaseLearner(input_size, hidden_size, output_size)\n model.load_state_dict(torch.load(filename[5:]+'.pt'))\n model.fc1.weight.requires_grad = False\n model.fc1.bias.requires_grad = False\n model.double()\n model.to(device)\n\n total = 0\n correct = 0\n for batch_x, batch_y in dataloader:\n batch_x = batch_x.to(device)\n batch_x = batch_x.double()\n batch_x = batch_x.view(-1, batch_size2, input_size)\n batch_y = batch_y.to(device)\n batch_y = batch_y.long()\n batch_y = batch_y.view(-1, batch_size2)\n\n for j in range(batch_x.size(0)):\n ybar = model(batch_x[j])\n ybar = ybar.max(1)[1]\n ybar = ybar.view(-1)\n total += batch_size2\n correct += (ybar == batch_y[j]).sum().item()\n for t in range(T):\n model.zero_grad()\n ybar = model(batch_x[j])\n loss = F.nll_loss(ybar, batch_y[j])\n loss.backward()\n for para in model.parameters():\n if para.requires_grad:\n para.data.sub_(para.grad.data * 0.01)\n\n return correct, total\n","sub_path":"evaluate/finetuning2.py","file_name":"finetuning2.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248433465","text":"import sys\nimport sqlite3\nfrom database import Database\n\ndef plot_difference(database_name, method1, method2, basis1, basis2, cp1, cp2, energy):\n \"\"\"\n Takes in two different sets of method/basis/cp and prints the average difference in energy for the same molecules between them\n \"\"\"\n \n # add .db to the database name if it doesn't already end in .db\n if database_name[-3:] != \".db\":\n print(\"Database name \\\"{}\\\" does not end in database suffix \\\".db\\\". Automatically adding \\\".db\\\" to end of database name.\".format(database_name))\n database_name += \".db\"\n\n database = Database(database_name)\n \n # get array of pairs of energies computed in the two different ways\n energy_pairs = database.get_comparison_energies(method1, method2, basis1, basis2, cp1, cp2, energy)\n\n differences = [] \n for energy_pair in energy_pairs:\n # make sure both energies are numbers and not \"N/A\" MIGHT BE UNNEEDED?\n if not energy_pair[0] == \"N/A\" and not energy_pair[1] == \"N/A\":\n differences.append(energy_pair[0] - energy_pair[1])\n try:\n print(\"Average Difference in Energy: {:3.3e}\".format(sum(differences) / len(differences)))\n except ZeroDivisionError as error:\n raise ValueError(\"No Matching energies for both categories for those methods and basises.\") from error\n\nif __name__ == \"__main__\":\n if sys.argv[1] == \"difference\":\n plot_difference(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9])\n","sub_path":"qm_mb_energy_calculator/src/database_plotter.py","file_name":"database_plotter.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"340974528","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nfrom Queue import PriorityQueue\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n pq = PriorityQueue()\n head=point=ListNode(0)\n \n for l in lists:\n if l:\n pq.put((l.val,l))\n \n while not pq.empty():\n val,node=pq.get()\n point.next=ListNode(val)\n point=point.next\n node=node.next\n if node:\n pq.put((node.val,node))\n \n return head.next\n","sub_path":"Heap/merge k sorted list.py","file_name":"merge k sorted list.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278490738","text":"import random\n\n# lottery with user amount of numbers\n\n\ndef lottery_numbers(user_input):\n lottery = []\n while len(lottery) < user_input:\n numbers = random.randint(1, 49)\n if numbers not in lottery:\n lottery.append(numbers)\n return lottery\n\n\nprint(\"Welcome to the lottery numbers generator.\")\nuser_input = int(raw_input(\"Please enter how many random numbers would you like to have:\"))\nprint(\"You entered: {}.\".format(user_input))\nplay = lottery_numbers(user_input)\nprint(play)\nprint(\"Thanks for playing.\")\nprint(\"END\")\n\n\n# def lottery_numbers():\n# return random.sample(range(1, 50), 6)\n\n\n# lottery with 6 numbers:\n# def lottery_numbers():\n# lottery = []\n# while len(lottery) < 6:\n# numbers = random.randint(1, 49)\n# if numbers not in lottery:\n# lottery.append(numbers)\n# else:\n# pass\n# return lottery\n#\n#\n# play = lottery_numbers()\n# print play\n","sub_path":"Python/lottery.py","file_name":"lottery.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344114838","text":"\n\nfrom xai.brain.wordbase.nouns._rooster import _ROOSTER\n\n#calss header\nclass _ROOSTERS(_ROOSTER, ):\n\tdef __init__(self,): \n\t\t_ROOSTER.__init__(self)\n\t\tself.name = \"ROOSTERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"rooster\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_roosters.py","file_name":"_roosters.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"151692416","text":"# %% Admin\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\nfrom dateutil import tz\nimport matplotlib.pyplot as plt\n\nfrom Helper.OdinExtract_Comments import *\nfrom Helper.Source import connect_to_db\n\n# %% Extraction and Processing\n# Extraction\nConn_Odin = connect_to_db()\nSubmission_Raw = ExtractComments_viaSubmission(Conn_Object=Conn_Odin, SubmissionID_str= \"kbpinr\")\nSubmission_Raw[\"SubCreate_US\"]= Submission_Raw[\"CreatedDate\"].dt.tz_localize(\"UTC\").dt.tz_convert('America/New_York')\nSubmission_Raw[\"ComCreate_US\"]= Submission_Raw[\"created_utc\"].dt.tz_localize(\"UTC\").dt.tz_convert('America/New_York')\n\nTemp_SubmissionCreation= Submission_Raw.iloc[0][\"SubCreate_US\"]\nTemp_TotalComments= len(Submission_Raw)\nTemp_Closed= Submission_Raw.iloc[Submission_Raw.index[-1]][\"IsClosed\"]\n\nSubmission1= Submission_Raw.drop([\"SubCreate_US\", \"IsClosed\",\"CreatedDate\",\"created_utc\"],1).copy()\nSubmission1.dtypes\n\n# Processing\nSubmission2= Submission1.copy()\nSubmission2[\"ComCreate_US_Ceil\"]= Submission2[\"ComCreate_US\"].dt.ceil(\"15min\")\nSubmission2= Submission2.sort_values(\"ComCreate_US_Ceil\")\nSubmission2.dtypes\n\n# %% Plot 1\nPlot1_Prep= Submission2[[\"ID_Submission\",\"ComCreate_US_Ceil\"]].groupby(\"ComCreate_US_Ceil\").agg({\"ID_Submission\": \"count\"}).reset_index()\nPlot1_Prep.columns= [\"Datetime\",\"Comments\"]\n\nTemp_Filling= pd.DataFrame({\"Datetime\": pd.date_range(min(Plot1_Prep[\"Datetime\"]), max(Plot1_Prep[\"Datetime\"]), freq='15min'),\n \"NumComments_Delete\": 0})\nPlot1_Prep2= pd.merge(left=Plot1_Prep, right=Temp_Filling, how=\"outer\", on=\"Datetime\").sort_values(\"Datetime\")\nPlot1_Prep2[\"Comments\"]= Plot1_Prep2[\"Comments\"].fillna(0)\nPlot1_Prep2= Plot1_Prep2.drop(\"NumComments_Delete\", 1)\n\nPlot1_Prep2[\"Comments_Cumsum\"]= (Plot1_Prep2[\"Comments\"].cumsum())/Temp_TotalComments*Plot1_Prep2[\"Comments\"].max()\n\nimport matplotlib.dates as mdates\nimport pytz\nplt.plot(Plot1_Prep2[\"Datetime\"], Plot1_Prep2[\"Comments\"])\nplt.fill_between(Plot1_Prep2[\"Datetime\"], Plot1_Prep2[\"Comments_Cumsum\"], color=\"skyblue\", alpha=0.4)\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d %H:%M', tz=pytz.timezone('America/New_York')))\nplt.gcf().autofmt_xdate()\nplt.xlabel(\"NY Datetime\"); plt.ylabel(\"#Comments\");\nplt.show()\n\n\n","sub_path":"Tests/Processing_CommentTrees_Todo/Comment_Exploration.py","file_name":"Comment_Exploration.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"11074882","text":"import numpy as np\nimport logging\n\nimport vtk\nfrom vedo import *\nfrom vedo.addons import *\n\nimport iblviewer.utils as utils\n\nclass VolumeView():\n\n #slicing_plane_ids = {'x+':0, 'x-':1}\n\n def __init__(self, plot, model, atlas_model):\n \"\"\"\n Constructor\n :param plot: Plot instance\n :param model: VolumeModel instance\n :param atlas_model: AtlasModel instance\n \"\"\"\n self.plot = plot\n self.model = model\n self.atlas_model = atlas_model\n\n self.actor = None\n self.alpha_factor = 0.001 #* self.model.volume.resolution\n\n self.clipping_planes = None\n self.clipping_axes = []\n self.scalar_bar = None\n self.dummy_actor = Cross3D([0, 0, 0], s=0.0, c='black')\n self.dummy_actor.pickable(0).lighting('off')\n self.plot.add(self.dummy_actor, render=False)\n\n # Init phase\n self.build_actor()\n self.set_color_map()\n \n #msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())\n #logging.info(msg)\n\n def build_actor(self):\n \"\"\"\n Set the volume actor for visualization in VTK\n \"\"\"\n spacing = np.array([self.model.resolution]*3)\n self.actor = Volume(self.model.volume, spacing=spacing, mapper='smart')\n self.actor.name = self.model.name\n self.actor.shade(False)\n self.actor.mode(0)\n self.actor.pickable(False)\n # Apparently, setting a custom spacing results in a misaligned volume\n # by exactly half a voxel. This is fixed here.\n self.actor.pos(spacing / 2)\n\n # TODO: Compute this in the model and check here when creating the actor that size matches?\n self.model.dimensions = np.array(self.actor.dimensions()).astype(np.float64) * self.model.resolution\n # center() is also wrong on the volume when spacing is used as it's not exactly dimensions() / 2\n self.model.center = np.array(self.actor.pos()) + np.array(self.actor.center())\n\n self.bounding_planes = []\n self.init_bounding_planes()\n self.init_clipping_planes()\n self.plot.add(self.actor, render=False)\n\n\n #self.actor.alphaUnit(1)\n #self.actor.jittering(True)\n #self.actor._mapper.AutoAdjustSampleDistancesOn()\n #self.actor._mapper.SetBlendModeToAverageIntensitye()\n #self.actor._mapper.SetSampleDistance(100)\n \n def init_bounding_planes(self):\n \"\"\"\n Bounding planes initialization\n \"\"\"\n axes = [0, 1, 2]\n for axis in axes:\n plane_origin = self.model.center + self.model.dimensions[axis]\n self.bounding_planes.append(plane_origin)\n\n def set_alpha_map(self, alpha_map, alpha_factor=None):\n \"\"\"\n Set alpha map to the volume view\n :param alpha_map: 2D list of scalar values and alpha values\n \"\"\"\n if alpha_map is None:\n alpha_map = self.atlas_model.transfer_function.alpha_map\n if alpha_factor is None:\n alpha_factor = self.alpha_factor\n volume_alpha_map = np.ones_like(alpha_map).astype(np.float)\n volume_alpha_map[:] = alpha_map[:]\n volume_alpha_map[:, 1] *= alpha_factor\n self.actor.alpha(volume_alpha_map)\n\n def set_color_map(self, color_map=None, alpha_map=None):\n \"\"\"\n Update the color map\n :param color_map: 4D list of scalar values and rgb colors\n :param alpha_map: 2D list of scalar values and alpha values\n \"\"\"\n tf = self.atlas_model.transfer_function\n if color_map is None:\n color_map = tf.color_map\n if alpha_map is None:\n alpha_map = tf.alpha_map\n\n if color_map is not None:\n self.actor.cmap(color_map)#['black', 'white'])\n if alpha_map is not None:# and self.segmentation_mode():\n self.set_alpha_map(alpha_map)\n\n #self.actor.addScalarBar(pos=(0.85,0.05), useAlpha=False)\n #self.plot.add([self.actor.scalarbar]) #, self.dummy_actor.scalarbar])\n self.plot.remove(self.scalar_bar)\n self.scalar_bar = utils.add_scalar_bar(tf.scalar_lut, pos=(0.8,0.05))\n self.plot.add([self.scalar_bar])\n\n def enable_shading(self):\n volumeProperty = self.actor.GetProperty()\n #volumeProperty.SetColor(volumeColor)\n #volumeProperty.SetScalarOpacity(volumeScalarOpacity)\n volumeProperty.SetInterpolationTypeToLinear()\n volumeProperty.ShadeOn()\n volumeProperty.SetAmbient(0.6)\n volumeProperty.SetDiffuse(0.8)\n volumeProperty.SetSpecular(0.9)\n volumeProperty.SetScalarOpacityUnitDistance(1)\n self.actor.SetProperty(volumeProperty)\n\n def init_clipping_planes(self, axes=[0, 1, 2], custom=None):\n \"\"\"\n Initialize X, Y and Z clipping planes with two planes per axis \n for positive and negative slicing and making slabs\n :param axes: List of axes\n :param custom: Custom axis normal\n \"\"\"\n self.clipping_planes = vtk.vtkPlaneCollection() #vtk.vtkPlanes()\n for axis in axes:\n p_plane = vtk.vtkPlane()\n n_plane = vtk.vtkPlane()\n '''\n normal = np.zeros(3).astype(float)\n normal[axis] = 1.0\n p_plane.SetNormal(normal)\n n_plane.SetNormal(-normal)\n '''\n self.clipping_planes.AddItem(p_plane)\n self.clipping_planes.AddItem(n_plane)\n self.clipping_axes = axes\n self.actor.mapper().SetClippingPlanes(self.clipping_planes)\n self.reset_clipping_planes(axes)\n\n def get_clipping_planes(self, except_axis=None):\n \"\"\"\n Get the current clipping planes except the ones on the given axis\n :param except_axis: Axis id to ignore. If None, all clipping planes will be returned\n :return: vtkPlaneCollection\n \"\"\"\n planes = vtk.vtkPlaneCollection()\n for axis in self.clipping_axes:\n if isinstance(except_axis, int) and except_axis == axis:\n continue\n double_axis_ref = axis * 2\n p_plane = self.clipping_planes.GetItem(double_axis_ref)\n n_plane = self.clipping_planes.GetItem(double_axis_ref + 1)\n planes.AddItem(p_plane)\n planes.AddItem(n_plane)\n return planes\n\n def reset_clipping_planes(self, axes=[0, 1, 2]):\n \"\"\"\n Reset clipping planes\n :param axes: Axes to be reset\n \"\"\"\n for axis in axes:\n double_axis_ref = axis * 2\n p_plane = self.clipping_planes.GetItem(double_axis_ref)\n n_plane = self.clipping_planes.GetItem(double_axis_ref + 1)\n normal = np.zeros(3)\n normal[axis] = 1.0\n position = self.bounding_planes[axis]\n p_plane.SetNormal(normal)\n p_plane.SetOrigin(-position)\n n_plane.SetNormal(-normal)\n n_plane.SetOrigin(position)\n\n def clip_on_axis(self, position=None, axis=None, normal=None):\n \"\"\"\n Apply clipping on a single axis\n :param position: Position\n :param axis: Clipping axis, defauls to 0 (X)\n :param thickness: Whether a thickness (so two clipping planes) are applied\n \"\"\"\n factor = 1\n axis_offset = 0\n # This should already be sorted in the model but in case it isn't, we double check here\n if normal is not None and normal[axis] < 0:\n # This means that the given axis has two \n # clipping planes and we take the negative one\n axis_offset += 1\n position = self.model.dimensions - position\n axis_storage_id = axis * 2 + axis_offset\n plane = self.clipping_planes.GetItem(axis_storage_id)\n plane.SetOrigin(position)\n plane.SetNormal(normal)\n\n def build_surface_mesh(self, region):\n \"\"\"\n Build a surface mesh with marching cubes algorithm\n \"\"\"\n # TODO: export labeled regions as surfaces\n isosurface = self.actor.threshold(region, region).isosurface(region)\n isosurface.computeNormals().smoothLaplacian().alpha(0.2)\n isosurface.color(self.atlas_model.get_region_color(region))\n #self.current_region_surface\n #for region_id in regions:\n #isosurface, laplacian smooth and export\n \"\"\"\n import numpy as np\n import vtk\n from vtk.util.numpy_support import numpy_to_vtk\n\n from .volume import numpy_to_volume\n from .volume import volume_to_numpy\n\n def _marching_cubes(label, index=None, normal=True, gradient=True, force_close=True):\n \"\"\"\n\n # Refactored code from yuta-hi/volume-renderer/pyvr/data/surface.py\n '''\n if not isinstance(label, np.ndarray):\n label, spacing, origin = volume_to_numpy(label)\n else:\n spacing, origin = [1,1,1], [0,0,0]\n\n if force_close: # NOTE: make the closed surface\n _pad_width = 10\n label = np.pad(label, pad_width=_pad_width, mode='constant', constant_values=0)\n else:\n _pad_width = 0\n\n origin -= _pad_width * np.array(spacing)\n label = numpy_to_volume(label, spacing, origin)\n\n surface = vtk.vtkDiscreteMarchingCubes()\n surface.SetInputData(label)\n\n if index is None:\n n_label = int(label.GetScalarRange()[1]) + 1\n surface.GenerateValues(n_label, 1, n_label)\n else:\n surface.GenerateValues(1, index, index)\n\n surface.ComputeNormalsOn()\n surface.ComputeGradientsOn()\n surface.Update()\n\n return surface.GetOutput()\n '''\n\n #def label_to_surface(self, volume, index=None, force_close=True):\n #return _marching_cubes(label=volume, index=index, force_close=force_close)\n\n\n # ------------------------------------------------------------------------- TEST CODE BELOW ONLY\n '''\n def clip_volume(self, obj, event):\n obj.GetPlanes(self.clipping_planes)\n \"\"\" plane = planes.GetPlane(0)\n pos = plane.GetOrigin()\n normal = plane.GetNormal()) \"\"\"\n vtk_n = self.clipping_planes.GetNormals()\n vtk_pts = self.clipping_planes.GetPoints()\n normals = [vtk_n.GetTuple(i) for i in range(vtk_n.GetNumberOfTuples())]\n points = [vtk_pts.GetPoint(i) for i in range(vtk_pts.GetNumberOfPoints())]\n self.volume_actor.mapper().SetClippingPlanes(self.clipping_planes)\n\n try:\n self.plot.remove(self.active_slicer.slice, render=False)\n except Exception:\n pass\n self.active_slicer.update(origin=points[0], normal=normals[0])\n\n self.plot.add([self.active_slicer.slice])\n\n def init_volume_cutter(self):\n \"\"\" \n if not self.plot.renderer:\n save_int = plt.interactive\n self.plot.show(interactive=0)\n self.plot.interactive = save_int \n \"\"\"\n volume = self.volume_actor\n\n widget = vtk.vtkPlaneWidget()\n widget.SetInteractor(self.plot.interactor)\n widget.SetPlaceFactor(1.0)\n widget.SetHandleSize(0.0025)\n self.plot.cutterWidget = widget\n #plt.renderer.AddVolume(vol)\n widget.SetInputData(volume.inputdata())\n \n # Only valid for boxWidget\n \"\"\" widget.OutlineCursorWiresOn()\n widget.GetSelectedOutlineProperty().SetColor(1, 0, 1)\n widget.GetOutlineProperty().SetColor(0.2, 0.2, 0.2)\n widget.GetOutlineProperty().SetOpacity(0.7) \"\"\"\n\n widget.SetRepresentationToOutline()\n \n self.clipping_planes.SetBounds(volume.GetBounds())\n widget.PlaceWidget(volume.GetBounds())\n \n #Only boxWidget\n #widget.InsideOutOn()\n \n #widget.GenerateClippedOuputOff()\n widget.AddObserver(\"InteractionEvent\", self.clip_volume)\n\n self.plot.interactor.Render()\n widget.On()\n\n self.plot.interactor.Start()\n widget.Off()\n self.plot.widgets.append(widget)\n '''","sub_path":"iblviewer/volume_view.py","file_name":"volume_view.py","file_ext":"py","file_size_in_byte":12045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191592488","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Ben Davis'\nSITENAME = u'TheBenDavis.net'\n\nTIMEZONE = 'America/New_York'\nDEFAULT_LANG = u'en'\n\n\n############################################################\n# Development-only settings\n\nSITEURL = ''\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\n\n\n############################################################\n# File management\n\n# static paths will be copied without parsing their contents\nSTATIC_PATHS = [\n 'media',\n 'blog_media',\n 'pubs',\n]\n\n# path-specific metadata\nEXTRA_PATH_METADATA = {\n 'media/CNAME': {'path': 'CNAME'},\n}\n\n\n############################################################\n# Paths\n\nPAGE_URL = '{slug}/'\nPAGE_SAVE_AS = '{slug}/index.html'\n\nARTICLE_URL = 'blog/{slug}/'\nARTICLE_SAVE_AS = 'blog/{slug}/index.html'\n\nDRAFT_URL = 'blog/drafts/{slug}/'\nDRAFT_SAVE_AS = 'blog/drafts/{slug}/index.html'\n\nTAGS_SAVE_AS = 'blog/tags.html'\nCATEGORIES_SAVE_AS = 'blog/categories.html'\n\nYEAR_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/index.html'\nMONTH_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/index.html'\nDAY_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/index.html'\n\nBLOG_INDEX_URL = 'blog'\nBLOG_INDEX_SAVE_AS = 'blog/index.html'\n\nDIRECT_TEMPLATES = ('index', 'blog_index', 'tags', 'categories', 'archives')\nPAGINATED_DIRECT_TEMPLATES = ['blog_index']\n\nMENUITEMS = [\n ('Blog', '/blog/'),\n]\n\n\n############################################################\n# Configuration\n\nTHEME = '../pelican-themes/pelican-bootstrap3'\nBOOTSTRAP_THEME = 'flatly'\nPYGMENTS_STYLE = 'monokai'\n\nDISPLAY_CATEGORIES_ON_MENU = False\n\nDISPLAY_ARTICLE_INFO_ON_INDEX = False\n\nHIDE_SIDEBAR = True\nDISPLAY_RECENT_POSTS_ON_SIDEBAR = True\nDISPLAY_CATEGORIES_ON_SIDEBAR = False\nDISPLAY_TAGS_ON_SIDEBAR = False\n\nDEFAULT_PAGINATION = 10\nSUMMARY_MAX_LENGTH = 100\n\nFRONT_PAGE_POSTS = 1\n#FRONT_PAGE_CATEGORIES = ['research']\n\n\n############################################################\n# Content\n\n# use post filename for slug by default\nSLUGIFY_SOURCE = 'basename'\n\nCUSTOM_CSS = 'media/css/custom.css'\n\nFAVICON = 'media/favicon.ico'\n\n#GITHUB_URL = 'https://github.com/thebendavis'\n\n# Blogroll\nLINKS = (\n #('Pelican', 'http://getpelican.com/'),\n #('Python.org', 'http://python.org/'),\n #('Jinja2', 'http://jinja.pocoo.org/'),\n #('You can modify those links in your config file', '#'),\n)\n\n# Social widget\nSOCIAL = (\n #('Twitter', 'https://twitter.com/'),\n #('Another social link', '#'),\n)\n\nUSE_OPEN_GRAPH = False\n\nWELCOME_BLURB = \"\"\"\n

\nMy name is Ben Davis. I'm a computer security and mobile systems researcher.\nTo read more about me, my work, and my academic publications\nclick here.\n

\n\n\"\"\"\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558081541","text":"#!/usr/bin/python\n\n# from flask import request #, make_response\n\nimport flask\nimport logging\nimport json\nimport math\nfrom flask import request, render_template\nfrom wordpress_orm import wp_session\nfrom wordpress_orm.entities.tag import Tag, TagRequest\nfrom ..wordpress_orm_extensions.scientific_paper import ScientificPaperRequest\n\nfrom ..utilities.pubmedIDpull import getMetaData\n\nfrom .. import app\nfrom .. import wordpress_api as api\n#from .. import wordpress_orm_logger as wp_logger\nfrom . import valueFromRequest\nfrom .navbar import navbar_template\nfrom .footer import populate_footer_template\n\n#logger = logging.getLogger(\"wordpress_orm\")\nwp_logger = logging.getLogger(\"wordpress_orm\")\napp_logger = logging.getLogger(\"sorghumbase\")\n\npublications_page = flask.Blueprint(\"publications_page\", __name__)\n\ndef getPapers(current_page, per_page, paper_tally, tag_filter, before, after, show_all, force_update, include):\n updatedPapers = []\n while show_all and per_page * (current_page-1) < paper_tally :\n updatedPapers += getPapers(current_page, per_page, paper_tally, tag_filter, before, after, False, force_update, include)\n current_page = current_page + 1\n if not show_all:\n paper_request = ScientificPaperRequest(api=api)\n if tag_filter:\n paper_request.tags = tag_filter\n if before:\n paper_request.before= before\n if after:\n paper_request.after= after\n if include:\n paper_request.include= include\n paper_request.per_page = per_page\n paper_request.page = current_page\n page_of_papers = paper_request.get()\n\n queryPubmed = []\n for p in page_of_papers :\n if len(p.s.pubmed_id) > 0 and (force_update or len(p.s.content) == 0) :\n queryPubmed.append(p)\n else :\n updatedPapers.append(p)\n\n if len(queryPubmed) > 0:\n info = getMetaData(queryPubmed)\n\n for paper in info:\n if not len(paper.s.paper_authors) == 0 :\n paper.s.content = paper.s.abstract + \"\\n\" + paper.s.paper_authors\n if paper.s.keywords != \"No keywords in Pubmed\":\n paper.s.content += \"\\n\" + paper.s.keywords\n paper_tags = []\n kwl = paper.s.keywords.split(',')\n kwd = [w.strip() for w in kwl]\n for keyword in kwd:\n new_tag = Tag(api=api)\n new_tag.s.name = keyword\n tag_id = str(new_tag.post)\n paper_tags.append(tag_id)\n paper.s.tags = ', '.join(paper_tags)\n paper.s.content += \"\\n\" + paper.s.pubmed_id\n if paper.s.doi:\n paper.s.content += \"\\n\" + paper.s.doi\n print(\"calling paper.update()\", paper.s.date)\n paper.update()\n print(\"updated paper\", paper.s.pubmed_id, paper.s.title)\n updatedPapers.append(paper)\n else :\n updatedPapers.append(paper)\n print(\"pubmed found no authors for\",paper.s.pubmed_id)\n return updatedPapers\n\nWAY_MORE_THAN_WE_WILL_EVER_HAVE = 100\n@publications_page.route('/publications')\ndef publications():\n ''' List of research papers '''\n templateDict = navbar_template('Research')\n show_all = valueFromRequest(key=\"show_all\", request=request, boolean=True) or False\n force_update = valueFromRequest(key=\"force_update\", request=request, boolean=True) or False\n tag_filter = request.args.getlist(\"tag\")\n before = valueFromRequest(key=\"before\", request=request)\n after = valueFromRequest(key=\"before\", request=request)\n current_page = valueFromRequest(key=\"page\", request=request, integer=True) or 1\n per_page = valueFromRequest(key=\"per_page\", request=request, integer=True) or 100\n keywords_limit = valueFromRequest(key=\"max_keywords\", request=request, integer=True) or 20\n include = valueFromRequest(key=\"include\", request=request, aslist=True)\n with api.Session():\n paper_count = ScientificPaperRequest(api=api)\n if tag_filter:\n paper_count.tags = tag_filter\n if before:\n paper_count.before = before\n if after:\n paper_count.after = after\n if include:\n paper_count.include = include\n paper_count.per_page = 1\n paper_count.page = 1\n paper_tally = paper_count.get(count=True)\n\n if force_update:\n updatedPapers = getPapers(current_page, per_page, paper_tally, tag_filter, before, after, show_all, force_update, include)\n else:\n updatedPapers = getPapers(1, 100, paper_tally, tag_filter, before, after, True, force_update, include)\n\n tag_freq = {}\n selected_tags = {}\n for tag in tag_filter:\n selected_tags[int(tag)] = 1\n for paper in updatedPapers:\n for tag in paper.s.tags:\n if tag not in tag_freq:\n tag_freq[tag] = 1\n else:\n tag_freq[tag] += 1\n first=0\n last=paper_tally\n if not show_all:\n first = per_page* (current_page-1)\n last = per_page * current_page\n if last > paper_tally:\n last = paper_tally\n templateDict['papers'] = updatedPapers[first:last]\n templateDict['page'] = current_page\n templateDict['n_pages'] = math.ceil(paper_tally / per_page)\n templateDict['n_papers'] = paper_tally\n templateDict['keywords_limit'] = keywords_limit\n if math.ceil(paper_tally / per_page) > 1:\n templateDict['filters_url'] = f\"/publications?per_page={per_page}&max_keywords={keywords_limit}\"\n templateDict['pagination_url'] = f\"/publications?per_page={per_page}&max_keywords={keywords_limit}\"\n templateDict['kw_url'] = f\"/publications?per_page={per_page}&page={current_page}\"\n if tag_filter:\n templateDict['pagination_url'] += f\"&tag={'&tag='.join(tag_filter)}\"\n templateDict['kw_url'] += f\"&tag={'&tag='.join(tag_filter)}\"\n if before:\n templateDict['pagination_url'] += f\"&before={before}\"\n templateDict['kw_url'] += f\"&before={before}\"\n if after:\n templateDict['pagination_url'] += f\"&after={after}\"\n templateDict['kw_url'] += f\"&after={after}\"\n tags_tally = 0\n min_2_tags = {key: value for (key, value) in sorted(tag_freq.items(), reverse=True, key=lambda t: t[1]) if value > 0 }\n tlist= list(min_2_tags.keys())\n tag_names = {}\n tags_per_page=100\n tag_page = 0\n for i in range(0,len(tlist), tags_per_page):\n tag_page += 1\n tag_getter = TagRequest(api=api)\n tag_getter.per_page = tags_per_page\n tag_getter.page = tag_page\n tag_getter.include = ','.join(map(str,tlist[i:i+tags_per_page]))\n tag_getter.populate_request_parameters()\n page_of_tags = tag_getter.get()\n for t in page_of_tags :\n tag_names[t.s.id] = t.s.name\n templateDict['tags'] = min_2_tags\n templateDict['tagname'] = tag_names\n templateDict['tagfreq'] = tag_freq\n templateDict['selected'] = selected_tags\n\n news_banner_media = api.media(slug=\"sorghum_panicle\")\n templateDict[\"banner_media\"] = news_banner_media\n\n populate_footer_template(template_dictionary=templateDict, wp_api=api, photos_to_credit=[news_banner_media])\n app_logger.debug(\" ============= controller finished ============= \")\n\n return render_template(\"research_filter.html\", **templateDict)\n","sub_path":"sorghum_webapp/sorghum_webapp/controllers/publications.py","file_name":"publications.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95957442","text":"#!/usr/bin/python\n# -*- coding: utf8\n'''Generator of momentum conservation laws combined from simple conservation laws for each node.\n'''\n\nimport comb\n\ndef Conservations(line_id_to_edge):\n \"\"\"Set of all the momentum conservation laws.\n\n Input: dictionary of line id to edge, e.g. {1: [3, 4], 2: [8, 9]}\n Output: set of conserving line ids.\n \"\"\"\n ret = set()\n conservations = GetNodesLines(line_id_to_edge)\n for n in range(1, len(conservations) + 1):\n for combination in comb.xCombinations(conservations, n):\n curr = set()\n for s in combination:\n curr.symmetric_difference_update(s)\n ret.add(frozenset(curr))\n ret.discard(frozenset([]))\n return ret\n\n\ndef GetNodesLines(line_id_to_edge):\n \"\"\"Returns list of sets of line ids connected to each node.\n \"\"\"\n node_to_line_ids = {}\n for line_id, edge in line_id_to_edge.iteritems():\n for node in edge:\n if node not in node_to_line_ids:\n node_to_line_ids[node] = set()\n node_to_line_ids[node].add(line_id)\n\n return node_to_line_ids.values()\n\n","sub_path":"phi4/graphs/conserv.py","file_name":"conserv.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18271972","text":"# -*- coding: utf-8 -*-\nfrom verdict_parser.person import Person\nimport unittest\n\n\nclass PersonTest(unittest.TestCase):\n def test_name_getter_setter(self):\n rep = \"John Don\"\n p = Person(\"John Co. Ltd\", representative=rep)\n self.assertEqual(rep, p.get_representative())\n self.assertTrue(len(p.get_lawyers()) == 0)\n\n lawyers = [\"Alex\", \"Mary\"]\n q = Person(\"Bob\", representative=None, lawyers=lawyers)\n self.assertTrue(\"Alex\" in q.get_lawyers())\n self.assertTrue(\"\" == q.get_representative())\n\n def test_to_json_data(self):\n rep = \"John Don\"\n lawyers = [\"Alex Kesley\", \"Richard Thaler\"]\n p = Person(\"John Co. Ltd\", representative=rep, lawyers=lawyers)\n data = p.to_json_data()\n expected = {'lawyers': lawyers,\n 'name': 'John Co. Ltd',\n 'rep': 'John Don'}\n for key in expected:\n self.assertEqual(expected[key], data[key])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"verdict_parser/test/unittest_person.py","file_name":"unittest_person.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154844074","text":"\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport datetime\nimport mysql.connector as m\nfrom mysql.connector import Error\nmatplotlib.use('Agg')\nfrom pytz import timezone\nfrom flask import json\n\n\ndef googleDataCalc(twoDArray,colList):\n uu=[]\n for k in twoDArray:\n u=[]\n for key,value in k.items():\n try:\n temp=eval(value)\n except:\n if isinstance(value,datetime.datetime):\n value=value.date()\n u.append(value)\n else:\n u.append(temp)\n uu.append(u)\n uu.insert(0,colList)\n return json.dumps(uu,default=str)\n\n\n\ndef sqldataformating(sqldata, header=[]):\n npdata1 = np.array(sqldata)\n length = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0,0,0]\n for i in npdata1:\n for j in range(len(i)):\n if length[j] < len(str(i[j])):\n length[j] = len(str(i[j]))\n\n for j in range(len(header)):\n\n if length[j] < len(str(header[j])):\n length[j] = len(str(header[j]))\n\n renderdata = []\n headerdata = []\n for i in range(len(npdata1)):\n renderdata.append(\" \".join(\n list(map(lambda j, k: [str(j).ljust(k), str(j).rjust(k)][isinstance(j, int)], npdata1[i], length))))\n for i in range(len(header)):\n headerdata.append(\" \".join(list(map(lambda j, k: str(j).capitalize().center(k), header, length))))\n return [renderdata, headerdata]\n\n\ndef makebarchart(command, title=\"\", xlable=\"\", ylable=\"\", filename=\"anonymus\",chart_type=\"barh\"):\n con = m.connect(host=\"localhost\", user=\"username\", passwd=\"svasnnmsjdcfjdDFGH!5548)\", database=\"vikas\")\n cur = con.cursor()\n \n try:\n cur.execute(command)\n except Error as e:\n print(e)\n cur.close()\n con.close()\n else:\n data = cur.fetchall()\n npdata = np.array(data)\n\n plt.title(title,color=\"#ac77f9\",fontsize=18)\n plt.tick_params(colors=\"white\")\n plt.grid(color='black', linestyle='-.', linewidth=.2)\n plt.xlabel(xlable,color=\"white\",fontsize=14)\n plt.ylabel(ylable,color=\"white\",fontsize=14)\n plt.xticks(rotation=60,ha='right')\n \n axis2 = list(map(lambda i: str(i.item()), npdata[0:, 1:2]))[::-1]\n axis1 = list(map(lambda i: str(i.item()), npdata[0:, 0:1]))[::-1]\n if isinstance(axis1[0], datetime.date):\n axis1 = list(map(lambda i: str(i.year) + \":\" + str(i.month) + \":\" + str(i.day), axis1))\n if isinstance(axis2[0], datetime.date):\n axis2 = list(map(lambda i: str(i.year) + \":\" + str(i.month) + \":\" + str(i.day), axis2))\n\n try:\n eval(axis1[0])\n except:\n eval(axis2[0])\n axis2 = list(map(lambda i: int(i), axis2))\n\n if chart_type==\"barh\":\n plt.barh(axis1, axis2, align=\"center\", color=\"#ac77f9\")\n elif chart_type==\"bar\":\n plt.bar(axis1, axis2, align=\"center\", color=\"#ac77f9\")\n elif chart_type==\"plot\":\n plt.plot(axis1, axis2, color=\"b\", linewidth=3)\n\n\n\n else:\n axis1 = list(map(lambda i: int(i), axis1))\n plt.xlim(0, max(axis1) * 1.01)\n\n\n if chart_type == \"barh\":\n plt.barh(axis2, axis1, align=\"center\", color=\"#ac77f9\")\n\n elif chart_type == \"bar\":\n plt.bar(axis2, axis1, align=\"center\", color=\"#ac77f9\")\n\n elif chart_type==\"plot\":\n plt.plot(axis2, axis1, color=\"#ac77f9\", linewidth=3)\n\n\n plt.savefig(\"/mpd/site/public/static/{}.png\".format(filename), dpi=90, bbox_inches='tight', transparent=True)\n t = datetime.datetime.now()\n plt.close()\n\n return \"{}.png\".format(filename) + \"?\" + str(t)\n\n\ndef summarydata(state='', district='',country='',check=''):\n \n commands=[]\n if check==\"world\":\n commands.append(\"select sum(cases) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select max(date) from coronaworld1;\")\n else:\n commands=[]\n commands.append(\"select sum(cases) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select sum(deaths) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select sum(cases) from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select sum(deaths) from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select max(date) from coronaworld1;\")\n\n commands.append(\"select sum(new_deaths) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select sum(new_cases) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select new_cases from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select new_deaths from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select total_recovered from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select sum(total_recovered) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select total_cases_per_1m_population from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n ''' \n commands=[]\n commands.append(\"select sum(cases) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select sum(deaths) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select sum(cases) from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select sum(deaths) from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select max(date) from coronaworld1;\")\n\n commands.append(\"select sum(new_deaths) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select sum(new_cases) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select new_cases from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select new_deaths from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select total_recovered from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n commands.append(\"select sum(total_recovered) from coronaworld1 where date=(select max(date) from coronaworld1);\")\n commands.append(\"select total_cases_per_1m_population from coronaworld1 where date=(select max(date) from coronaworld1) and country_name='India';\")\n '''\n if district!='' and state!='':\n commands.append(f\"select confirmed,deceased from districtdaily where date=(select max(date) from districtdaily) and state ='{state}' and district='{district}';\")\n if district=='' and state!='':\n commands.append(f\"select totalconfirmed,deaths from coronastatedaily where date=(select max(date) from coronastatedaily) and loc ='{state}';\")\n if country!='' and state==\"\":\n commands.append(f\"select cases,deaths from coronaworld1 where date=(select max(date) from coronaworld1 ) and country_name='{country}';\")\n \n con = m.connect(host=\"localhost\", user=\"username\", passwd=\"svasnnmsjdcfjdDFGH!5548)\", database=\"vikas\")\n cur = con.cursor()\n\n result = []\n for command in commands:\n try:\n \n cur.execute(command)\n except Error as e:\n print(e)\n else:\n data = cur.fetchall()\n if len(data[0])==2:\n result.append(int(data[0][0]))\n result.append(int(data[0][1]))\n else:\n try:\n result.append(int(data[0][0]))\n except:\n newtimediffer=(datetime.datetime.now()-data[0][0])\n x=data[0][0]\n a='{:.0f} minutes'.format (( (newtimediffer.days*3600*24)+newtimediffer.seconds ) /60)\n result.append(a)\n result.append(x)\n cur.close()\n con.close()\n return result\n\n\ndef chart(axis1,axis2=[],filename=\"annonous\",chart_type=\"barh\", title=\"\", xlable=\"\", ylable=\"\"):\n\n if isinstance(axis1[0], datetime.date):\n axis1 = list(map(lambda i: str(i.year) + \":\" + str(i.month) + \":\" + str(i.day), axis1))\n if isinstance(axis2[0], datetime.date):\n axis2 = list(map(lambda i: str(i.year) + \":\" + str(i.month) + \":\" + str(i.day), axis2))\n\n \n plt.xlabel(xlable, color=\"white\",fontsize=14)\n plt.ylabel(ylable, color=\"white\",fontsize=14)\n plt.title(title, color=\"#ac77f9\",fontsize=18)\n plt.xticks(rotation=60,ha='right')\n plt.grid(color='black', linestyle='-.', linewidth=.2)\n try:\n eval(axis1[0])\n except:\n eval(axis2[0])\n axis2 = list(map(lambda i: int(i), axis2))\n\n if chart_type == \"barh\":\n plt.barh(axis1, axis2, align=\"center\", color=\"#ac77f9\")\n plt.tick_params(colors='white')\n\n elif chart_type == \"bar\":\n plt.bar(axis1, axis2, align=\"center\", color=\"#ac77f9\")\n\n plt.tick_params(colors='red')\n\n elif chart_type == \"plot\":\n plt.plot(axis1, axis2, color=\"b\", linewidth=3)\n\n plt.tick_params(colors='white')\n\n else:\n axis1 = list(map(lambda i: int(i), axis1))\n\n if chart_type == \"barh\":\n plt.barh(axis2, axis1, align=\"center\", color=\"#ac77f9\")\n plt.tick_params(colors='white')\n\n elif chart_type == \"bar\":\n plt.bar(axis2, axis1, align=\"center\", color=\"#ac77f9\")\n plt.tick_params(colors='white')\n\n elif chart_type == \"plot\":\n plt.plot(axis2, axis1, color=\"b\", linewidth=3)\n plt.xticks(rotation=60)\n plt.tick_params(colors='white')\n\n plt.savefig(\"/mpd/site/public/static/{}.png\".format(filename), dpi=90, bbox_inches='tight', transparent=True)\n t = datetime.datetime.now()\n plt.close()\n\n return \"{}.png\".format(filename) + \"?\" + str(t)\n\n\ndef getplace(string):\n place=\"\"\n cou=True\n for i in string:\n try:\n isinstance(int(i),int)\n if cou==False:\n break\n except:\n if not(\"-\" in i or i ==\"\" ):\n place=place+\" \"+i\n cou=False\n place=place.strip(\" \")\n\n return place\n\n\ndef getdailydifference(table,integeraxis,stringaxis,\n fc1=\"state\",pk1=\"Haryana\",fc2=\"district\",pk2=\"faridaad\",orderby='-date',limit=15):\n '''\n :param table: Districtdaily (capitalize format)\n :param integeraxis:\n :param stringaxis:\n :param fc1: filter column1 name (state)\n :param pk1: promary key value (Delhi)\n :param fc2: filter column2 name (District)\n :param pk2: primary key value (Shadara)\n :param orderby: (Default -desc)\n :param limit: (Default 15)\n :return: [[list_string],[list_value(in string format)]]\n '''\n from india.models import Coronastatedaily, Districtdaily,Coronaworld1\n #from world.models import Coronaworld1\n if \"Districtdaily\" in table:\n alldata=Districtdaily.objects.filter(**{fc1:pk1},**{fc2:pk2}).values(integeraxis,stringaxis).order_by(orderby)[:limit+1]\n if \"Coronastatedaily\" in table:\n alldata = Coronastatedaily.objects.filter(**{fc1: pk1}).values(integeraxis, stringaxis).order_by(\n orderby)[:limit]\n if \"Coronaworld1\" in table:\n alldata = Coronaworld1.objects.filter(**{fc1: pk1}).values(integeraxis, stringaxis).order_by(\n orderby)[:limit]\n\n\n axis1=list(map(lambda a:a[stringaxis],alldata))\n axis2=list(map(lambda a:a[integeraxis],alldata))\n\n for i in range(len(axis2)-1):\n axis2[i]=str(int(axis2[i])-int(axis2[i+1]))\n\n return [axis1[-2::-1],axis2[-2::-1]]\n\ndef rank(i):\n suf=\"th\"\n if i%10==1:\n suf=\"st\"\n if i%10==2:\n suf=\"nd\"\n if i%10==3:\n suf=\"rd\"\n if (i%100)//10==1:\n suf=\"th\"\n return suf\n\n\ndef getfactsdata(table,string,substring=\"\"):\n from india.models import Coronastatedaily, Districtdaily,Coronaworld1\n #from world.models import Coronaworld1\n from django.db.models import Max\n \n subdate = Districtdaily.objects.aggregate(Max('date'))\n factlist=[]\n\n\n if table==\"Districtdaily\":\n for criteria in ('active','confirmed','deceased','recovered'):\n alldata=Districtdaily.objects.filter(state=string,date=subdate['date__max']).values(\"district\",criteria).order_by(criteria)\n rank=1\n for i in alldata:\n if i['district']==substring:\n break\n else:\n rank=rank+1\n factlist.append({'rank':len(alldata)-rank+1,'total':len(alldata),'critaria':criteria.capitalize(),'location':substring,'parentlocation':string})\n if table==\"Coronastatedaily\":\n subdate = Coronastatedaily.objects.aggregate(Max('date'))\n for criteria in ('totalconfirmed','deaths','discharged'):\n\n alldata=Coronastatedaily.objects.filter(date=subdate['date__max']).values(\"loc\",criteria).order_by(criteria)\n\n rank=1\n for i in alldata:\n if i['loc']==string:\n break\n else:\n rank=rank+1\n if criteria==\"totalconfirmed\":\n criteria=\"total confirmed\"\n factlist.append({'rank':len(alldata)-rank+1,'total':len(alldata),'critaria':criteria.capitalize(),'location':string,'parentlocation':'India'})\n if table == \"Coronaworld1\":\n subdate = Coronaworld1.objects.aggregate(Max('date'))\n for criteria in ('cases', 'deaths', 'active_cases','total_cases_per_1m_population','tests_per_1m_population','deaths_per_1m_population'):\n\n alldata=Coronaworld1.objects.filter(date=subdate['date__max']).values(\"country_name\", criteria).order_by(criteria)\n\n rank = 1\n for i in alldata:\n if i['country_name'] == string:\n break\n else:\n rank = rank + 1\n factlist.append({'rank': len(alldata)-rank+1, 'total': len(alldata), 'critaria': criteria.replace(\"_\",\" \").capitalize(), 'location': string,\n 'parentlocation': 'Globe'})\n\n return factlist\n\n","sub_path":"django/abcd/coronadisplay.py","file_name":"coronadisplay.py","file_ext":"py","file_size_in_byte":14727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164269920","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport math\ns=0\nn=input('digite o valor de n:')\nfor i in range(1,n+1,1):\n k=i/(i*i)\n if i%2==0:\n s=s-k\n else:\n s=s+k\nprint('%.5f'%s)","sub_path":"moodledata/vpl_data/28/usersdata/141/9778/submittedfiles/serie1.py","file_name":"serie1.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"295663341","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport subprocess\nimport sys\n\nimport pytest\n\nfrom airflow.jobs.backfill_job_runner import BackfillJobRunner\nfrom airflow.jobs.job import Job, run_job\nfrom airflow.models import DagBag, DagRun, TaskInstance\nfrom airflow.utils.db import add_default_pool_if_not_exists\nfrom airflow.utils.state import State\nfrom airflow.utils.timezone import datetime\nfrom airflow.utils.types import DagRunType\nfrom tests.test_utils import db\n\nDEV_NULL = \"/dev/null\"\nTEST_ROOT_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nTEST_DAG_FOLDER = os.path.join(TEST_ROOT_FOLDER, \"dags\")\nTEST_DAG_CORRUPTED_FOLDER = os.path.join(TEST_ROOT_FOLDER, \"dags_corrupted\")\nTEST_UTILS_FOLDER = os.path.join(TEST_ROOT_FOLDER, \"test_utils\")\nDEFAULT_DATE = datetime(2015, 1, 1)\nTEST_USER = \"airflow_test_user\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture\ndef check_original_docker_image():\n if not os.path.isfile(\"/.dockerenv\") or os.environ.get(\"PYTHON_BASE_IMAGE\") is None:\n raise pytest.skip(\n \"Adding/removing a user as part of a test is very bad for host os \"\n \"(especially if the user already existed to begin with on the OS), \"\n \"therefore we check if we run inside a the official docker container \"\n \"and only allow to run the test there. This is done by checking /.dockerenv file \"\n \"(always present inside container) and checking for PYTHON_BASE_IMAGE variable.\"\n )\n\n\n@pytest.fixture\ndef set_permissions(check_original_docker_image):\n airflow_home = os.environ[\"AIRFLOW_HOME\"]\n subprocess.check_call(\n 'find \"%s\" -exec sudo chmod og+w {} +; sudo chmod og+rx /root' % airflow_home, shell=True\n )\n yield\n subprocess.check_call(\n 'find \"%s\" -exec sudo chmod og-w {} +; sudo chmod og-rx /root' % airflow_home, shell=True\n )\n\n\n@pytest.fixture\ndef create_user(check_original_docker_image):\n try:\n subprocess.check_output(\n [\"sudo\", \"useradd\", \"-m\", TEST_USER, \"-g\", str(os.getegid())], stderr=subprocess.STDOUT\n )\n except subprocess.CalledProcessError as e:\n command = e.cmd[1]\n if e.returncode != 9: # pass: username already exists\n raise pytest.skip(\n f\"{e} Skipping tests.\\n\"\n f\"Does command {command!r} exists and the current user have permission to run \"\n f\"{command!r} without a password prompt (check sudoers file)?\\n\"\n f\"{e.stdout.decode() if e.stdout else ''}\"\n )\n yield\n subprocess.check_call([\"sudo\", \"userdel\", \"-r\", TEST_USER])\n\n\nclass BaseImpersonationTest:\n dagbag: DagBag\n\n @pytest.fixture(autouse=True)\n def setup_impersonation_tests(self, set_permissions, create_user):\n \"\"\"Setup test cases for all impersonation tests.\"\"\"\n db.clear_db_runs()\n db.clear_db_jobs()\n add_default_pool_if_not_exists()\n yield\n db.clear_db_runs()\n db.clear_db_jobs()\n\n @staticmethod\n def get_dagbag(dag_folder):\n \"\"\"Get DagBag and print statistic into the log.\"\"\"\n dagbag = DagBag(dag_folder=dag_folder, include_examples=False)\n logger.info(\"Loaded DAGs:\")\n logger.info(dagbag.dagbag_report())\n return dagbag\n\n def run_backfill(self, dag_id, task_id):\n dag = self.dagbag.get_dag(dag_id)\n dag.clear()\n\n job = Job()\n job_runner = BackfillJobRunner(job=job, dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n run_job(job=job, execute_callable=job_runner._execute)\n run_id = DagRun.generate_run_id(DagRunType.BACKFILL_JOB, execution_date=DEFAULT_DATE)\n ti = TaskInstance(task=dag.get_task(task_id), run_id=run_id)\n ti.refresh_from_db()\n\n assert ti.state == State.SUCCESS\n\n\nclass TestImpersonation(BaseImpersonationTest):\n @classmethod\n def setup_class(cls):\n cls.dagbag = cls.get_dagbag(TEST_DAG_FOLDER)\n\n def test_impersonation(self):\n \"\"\"\n Tests that impersonating a unix user works\n \"\"\"\n self.run_backfill(\"test_impersonation\", \"test_impersonated_user\")\n\n def test_no_impersonation(self):\n \"\"\"\n If default_impersonation=None, tests that the job is run\n as the current user (which will be a sudoer)\n \"\"\"\n self.run_backfill(\n \"test_no_impersonation\",\n \"test_superuser\",\n )\n\n def test_default_impersonation(self, monkeypatch):\n \"\"\"\n If default_impersonation=TEST_USER, tests that the job defaults\n to running as TEST_USER for a test without 'run_as_user' set.\n \"\"\"\n monkeypatch.setenv(\"AIRFLOW__CORE__DEFAULT_IMPERSONATION\", TEST_USER)\n self.run_backfill(\"test_default_impersonation\", \"test_deelevated_user\")\n\n @pytest.mark.execution_timeout(150)\n def test_impersonation_subdag(self):\n \"\"\"Tests that impersonation using a subdag correctly passes the right configuration.\"\"\"\n self.run_backfill(\"impersonation_subdag\", \"test_subdag_operation\")\n\n\nclass TestImpersonationWithCustomPythonPath(BaseImpersonationTest):\n @pytest.fixture(autouse=True)\n def setup_dagbag(self, monkeypatch):\n # Adds a path to sys.path to simulate running the current script with `PYTHONPATH` env variable set.\n monkeypatch.syspath_prepend(TEST_UTILS_FOLDER)\n self.dagbag = self.get_dagbag(TEST_DAG_CORRUPTED_FOLDER)\n monkeypatch.undo()\n yield\n\n def test_impersonation_custom(self, monkeypatch):\n \"\"\"\n Tests that impersonation using a unix user works with custom packages in PYTHONPATH.\n \"\"\"\n monkeypatch.setenv(\"PYTHONPATH\", TEST_UTILS_FOLDER)\n assert TEST_UTILS_FOLDER not in sys.path\n self.run_backfill(\"impersonation_with_custom_pkg\", \"exec_python_fn\")\n","sub_path":"tests/core/test_impersonation_tests.py","file_name":"test_impersonation_tests.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"265417314","text":"import os\nimport endpoints\nfrom gaeapp.users.models import User\nfrom gaeapp.users.models import UserToken\n\n\ndef get_current_user(raise_exception=True):\n token = os.environ.get('HTTP_AUTHORIZATION')\n user_token = UserToken.all().filter('token =', token).filter('subject =', 'auth').get()\n\n if (not user_token or\n not token and raise_exception):\n raise endpoints.UnauthorizedException('Unauthorized: Access is denied due to invalid credentials.')\n\n user = User.get_by_auth_token(user_token.user, token)\n\n if user is None and raise_exception:\n raise endpoints.UnauthorizedException('Unauthorized: Access is denied due to invalid credentials.')\n return user, token\n\n\ndef patch_instance(instance, request, reset_field=None):\n # reset for field ID e.g. cuz models doesn't have such field,\n # and it's not changeable\n if reset_field:\n request.reset(reset_field)\n\n for field in request.all_fields():\n value = request.get_assigned_value(field.name)\n if value is not None:\n if field.required and not value:\n raise endpoints.BadRequestException('Missing required field %s' % field.name)\n instance.__setattr__(field.name, value)\n return instance\n","sub_path":"src/gaeapp/endpoints_extras.py","file_name":"endpoints_extras.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"591375127","text":"import numpy as np\nfrom numpy.linalg import matrix_power\nfrom numpy.random import default_rng, PCG64\nfrom numba import jit\nimport time\nrg = default_rng()\nx = PCG64()\nf_pcg64 = x.ctypes.next_double\nstate_addr = x.ctypes.state_address\n\n\n## fibonacci ##\n@jit(nopython=True, cache=True)\ndef fib(n):\n if n < 2:\n return n\n return fib(n-1)+fib(n-2)\n\n## quicksort ##\n@jit(nopython=True, cache=True)\ndef qsort_kernel(a, lo, hi):\n i = lo\n j = hi\n while i < hi:\n pivot = a[(lo+hi) // 2]\n while i <= j:\n while a[i] < pivot:\n i += 1\n while a[j] > pivot:\n j -= 1\n if i <= j:\n a[i], a[j] = a[j], a[i]\n i += 1\n j -= 1\n if lo < j:\n qsort_kernel(a, lo, j)\n lo = i\n j = hi\n return a\n\n## randmatstat ##\n@jit(nopython=True, cache=True)\ndef randmatstat_core(t, a, b, c, d):\n v = np.zeros(t)\n w = np.zeros(t)\n for i in range(t):\n P = np.hstack((a[i], b[i], c[i], d[i]))\n Q = np.vstack((np.hstack((a[i], b[i])), np.hstack((c[i], d[i]))))\n v[i] = np.trace(matrix_power(P.T @ P, 4))\n w[i] = np.trace(matrix_power(Q.T @ Q, 4))\n return np.std(v)/np.mean(v), np.std(w)/np.mean(w)\n\ndef randmatstat(t):\n n = 5\n a = rg.standard_normal((t, n, n))\n b = rg.standard_normal((t, n, n))\n c = rg.standard_normal((t, n, n))\n d = rg.standard_normal((t, n, n))\n return randmatstat_core(t, a, b, c, d)\n\n## randmatmul ##\n# https://docs.scipy.org/doc/numpy/reference/random/extending.html\n@jit(nopython=True)\ndef pcg64_random(n, state):\n out = np.empty((n, n))\n for i in range(n):\n for j in range(n):\n out[i, j] = f_pcg64(state)\n return out\n\n@jit(nopython=True)\ndef randmatmul(n, state):\n a = pcg64_random(n, state)\n b = pcg64_random(n, state)\n return np.dot(a, b)\n\n## mandelbrot ##\n@jit(nopython=True, cache=True)\ndef abs2(z):\n return z.real*z.real + z.imag*z.imag\n\n@jit(nopython=True, cache=True)\ndef mandel(z):\n maxiter = 80\n c = z\n for n in range(maxiter):\n if abs2(z) > 4:\n return n\n z = z*z + c\n return maxiter\n\n@jit(nopython=True, cache=True)\ndef mandelperf():\n a = np.empty((21, 26), dtype=np.int64)\n for i in range(21):\n for r in range(26):\n a[i, r] = mandel(complex((r - 20)/10, (i - 10)/10)) \n return a\n\n@jit(nopython=True, cache=True)\ndef pisum():\n sum = 0.0\n for j in range(500):\n sum = 0.0\n for k in range(1, 10001):\n sum += 1.0/(k*k)\n return sum\n\n\n@jit(nopython=True, cache=True)\ndef hex(b):\n x = b // 16\n y = b % 16\n return x + 48 if x < 10 else x + 55, y + 48 if y < 10 else y + 55\n\n@jit(nopython=True, cache=True)\ndef toint(x, y):\n a1 = x - 48 if x < 58 else x - 55\n a2 = y - 48 if y < 58 else y - 55\n return a1 * 16 + a2\n\n@jit(nopython=True, cache=True)\ndef int2hex(a):\n t = a.shape[0]\n u = np.empty(8 * t, dtype=np.int32)\n a8 = np.frombuffer(a, dtype=np.uint8)\n for i in range(t):\n for j in range(4):\n u[8 * i + 6 - 2 * j], u[8 * i + 7 - 2 * j] = hex(a8[4 * i + j])\n return u\n\n@jit(nopython=True, cache=True)\ndef hex2int(v):\n t = v.shape[0] // 8\n b8 = np.empty(4 * t, dtype=np.uint8)\n for i in range(t):\n for j in range(4):\n b8[4 * i + j] = toint(v[8 * i + 6 - 2 * j], v[8 * i + 7 - 2 * j])\n return np.frombuffer(b8, dtype=np.uint32)\n\ndef parse_int(t):\n a = np.random.randint(0, 2 ** 32 - 1, t, dtype=np.uint32)\n u = int2hex(a)\n s = np.frombuffer(u, dtype=' 0:\n asc[d] = n % 10 + 48\n n = n // 10\n d += 1\n return d\n\n\n@jit(nopython=True, cache=True)\ndef printfd_core(buf, start, t, buf_size):\n num = 0\n asc = np.empty(20, dtype=np.int8)\n i = start\n while i < t:\n d = int2ascii(i, asc)\n for j in range(d - 1, -1, -1):\n buf[num] = asc[j]\n num += 1\n buf[num] = 32\n num += 1\n d = int2ascii(i + 1, asc)\n for j in range(d - 1, -1, -1):\n buf[num] = asc[j]\n num += 1\n buf[num] = 10\n num += 1\n i += 1\n if num > buf_size:\n break\n return num, i\n\ndef printfd(t):\n buf_size = 10000\n buf = np.empty(buf_size + 50, dtype='u1')\n start = 1\n with open(\"/dev/null\", \"wb\") as f:\n # with open(\"test.txt\", \"wb\") as f: #テスト用\n while start < t:\n num, start = printfd_core(buf, start, t, buf_size)\n f.write(buf[:num].tobytes())\n\n\ndef print_perf(name, time):\n print(\"numba,\" + name + \",\" + str(time*1000))\n\n## run tests ##\n\nif __name__==\"__main__\":\n\n mintrials = 5\n\n assert fib(20) == 6765\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n f = fib(20)\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf(\"recursion_fibonacci\", tmin)\n\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n n = parse_int(1000)\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"parse_integers\", tmin)\n\n assert mandelperf().sum() == 14791\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n mandelperf()\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"userfunc_mandelbrot\", tmin)\n\n tmin = float('inf')\n for i in range(mintrials):\n lst = rg.random(5000)\n t = time.time()\n qsort_kernel(lst, 0, len(lst)-1)\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"recursion_quicksort\", tmin)\n\n tmin = float('inf')\n for i in range(mintrials):\n lst = rg.random(5000)\n t = time.time()\n lst.sort()\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"quicksort\", tmin)\n\n assert abs(pisum()-1.644834071848065) < 1e-6\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n pisum()\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"iteration_pi_sum\", tmin)\n\n (s1, s2) = randmatstat(1000)\n assert s1 > 0.5 and s1 < 1.0\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n randmatstat(1000)\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"matrix_statistics\", tmin)\n\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n C = randmatmul(1000, state_addr)\n assert C[0,0] >= 0\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"matrix_multiply\", tmin)\n\n tmin = float('inf')\n for i in range(mintrials):\n t = time.time()\n printfd(100000)\n t = time.time()-t\n if t < tmin: tmin = t\n print_perf (\"print_to_file\", tmin)\n","sub_path":"perf_numba.py","file_name":"perf_numba.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"606959556","text":"class Solution:\n def convertToTitle(self, n: int) -> str:\n start = 64\n res = \"\"\n while n > 0:\n index = n % 26\n n = n // 26\n if index == 0:\n res = 'Z' + res\n n -= 1\n else:\n res = chr(start + index) + res\n\n return res\n \n\n\nif __name__ == \"__main__\":\n s = Solution()\n for n in range(1, 56):\n print(n, s.convertToTitle(n))","sub_path":"easy/convertToTitle.py","file_name":"convertToTitle.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"324448148","text":"#!/usr/bin/env python3\n\"\"\"\n@author: Timothy Baker\n@date: 02-11-2019\n\nfastq2fasta.py\n\nbiopython as dependency\n\n\"\"\"\nimport sys\nfrom Bio import SeqIO\n\n\ndef main():\n \"\"\" runs main script \"\"\"\n\n # not standard input\n # takes file name directly as argument\n fasta_text = sys.argv[1]\n\n # biopython to convert fastq to fasta\n # output name is fasta-output.txt\n SeqIO.convert(fasta_text, \"fastq\", 'fasta-output.txt', \"fasta\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fastq2fasta.py","file_name":"fastq2fasta.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"618750781","text":"from Screen.Screen import Screen\nimport View.ScreenRenderer as gui\nimport Sound\n\nclass CreditsScreen(Screen):\n\n def __init__(self,scale):\n super().__init__(\"Credits\")\n menu = gui.ButtonMenu(15, 5, 25, 7, 1, {\n \"Back\": \"Main\"\n }, function=self.changeScreen, scale=scale)\n self.guiObjects.append(menu)\n\n\n F = open(\"files/Credits\", \"r\")\n print(F)\n self.credits = gui.StringC(F.read(),42,50,font=\"mono-24\")\n F.close()\n self.guiObjects.append(self.credits)\n\n self.ytrans=5\n self.speed=2\n\n\n def changeTo(self,game):\n self.ytrans=5\n if Sound.musicChannel.get_sound() != Sound.getSound(\"m_menu\"):\n Sound.playMusic(\"m_menu\")\n\n\n def updateScreen(self,delta):\n self.ytrans-=delta*self.credits.height*self.speed\n self.credits.y=self.ytrans\n super().updateScreen(delta)\n","sub_path":"Screen/CreditsScreen.py","file_name":"CreditsScreen.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245375142","text":"from lxml import html\nimport requests\n\npage = requests.get('http://polyratings.com/list.phtml')\ntree = html.fromstring(page.content)\n\n# This will create a list of buyers\nprofessor_list_one = tree.xpath('//a[@class=\"blknav\"]/text()')\n#professor_list_two = tree.xpath('//a[@class=\"nav2\"]/text()')\n\n#This will create a list of prices\n#prices = tree.xpath('//span[@class=\"item-price\"]/text()')\n\nfor x in range(len(professor_list_one)):\n print(professor_list_one[x])\n\n#print('Prices: ', prices)\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"444679358","text":"from bottle import route, run, response\nfrom time import sleep\nimport redis, configparser, thread, requests, sys, bottle\n\nfrom src import api \nclass EnableCors(object):\n name = 'enable_cors'\n api = 2\n\n def apply(self, fn, context):\n def _enable_cors(*args, **kwargs):\n # set CORS headers\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n if bottle.request.method != 'OPTIONS':\n # actual request; reply with the actual response\n return fn(*args, **kwargs)\n\n return _enable_cors\n\n\nclass StripPathMiddleware(object):\n def __init__(self, app):\n self.app = app\n def __call__(self, e, h):\n e['PATH_INFO'] = e['PATH_INFO'].rstrip('/')\n return self.app(e,h)\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nbhost = config.get('bottle','host')\nbport = config.getint('bottle','port')\nbdebug = config.getboolean('bottle','debug')\napp = bottle.app()\napp.install(EnableCors())\nrun(app=StripPathMiddleware(app),host=bhost, port=bport, debug=bdebug, server='cherrypy')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"53986894","text":"from django.forms import ModelForm\r\nfrom django import forms\r\nfrom sede.models import Sede,ConfiguracionSede\r\nfrom django.db import models\r\n\r\nclass SedeForm(ModelForm):\r\n class Meta:\r\n model = Sede\r\n exclude=('complejo','direccion')\r\n widgets={\r\n 'descripcion': forms.Textarea(attrs={'cols':10, 'class':'ckeditor', 'width': '500px'}),\r\n 'fecha_habilitacion': forms.TextInput(attrs={'class':'datePicker', 'readonly':'true'})\r\n }\r\n\r\nclass ConfiguracionSedeForm(ModelForm):\r\n class Meta:\r\n model = ConfiguracionSede\r\n exclude=('sede')\r\n ","sub_path":"sede/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650259841","text":"from dataclasses import dataclass, field\nfrom dataclasses_json import dataclass_json\nfrom typing import Dict, List, Tuple, Union\n\n\n@dataclass_json\n@dataclass\nclass Hobby:\n type: str\n desc: str\n\n def pretty_desc(self):\n print(\"yoyo, this hobby is dope: {}\".format(self.desc))\n\n\n@dataclass_json\n@dataclass\nclass Person:\n name: str\n age: int\n hobbies: List[Hobby]\n\n\np = Person(\"Albert\", 42, [Hobby(\"pingpong\", \"slammo\"), Hobby(\"tennis\", \"whammo\")])\n\nprint(p.to_json())\n\nj = p.to_json()\n\nperson = Person.from_json(j)\n[h.pretty_desc() for h in person.hobbies]\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"362665574","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom utils.NiftiDataset import *\nimport utils.NiftiDataset as NiftiDataset\nfrom tqdm import tqdm\nimport datetime\nfrom networks.generator import *\nimport argparse\nimport matplotlib.pyplot as plt\nimport math\nimport scipy\n\n''' The script run the inference on the single early frame image by the user. Normalization is performed and images are scaled to interval values: 0-255.\n The path of the input image and the path to save the result must be specified in the command line. To have fewer patches to inference for one image,\n please increase the stride_inplane and stride_layer values. The values must be less than the image size to avoid errors. '''\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--Use_GPU', action='store_true', default=True, help='Use the GPU')\nparser.add_argument('--Select_GPU', type=str, default='0', help='Select the GPU')\nparser.add_argument(\"--image\", type=str, default='./Data_folder/volumes/HC014 test_MoCo_PET_Frame_25.nii', help='path to the .nii low dose image')\nparser.add_argument(\"--result\", type=str, default='./Data_folder/volumes/prova.nii', help='path to the .nii result to save')\nparser.add_argument(\"--gen_weights\", type=str, default='./History/weights/frame25.h5', help='generator weights to load')\n# Training parameters\nparser.add_argument(\"--resample\", default=False, help='Decide or not to resample the images to a new resolution')\nparser.add_argument(\"--new_resolution\", type=float, default=(2.086, 2.086, 2.031), help='New resolution')\nparser.add_argument(\"--input_channels\", type=float, nargs=1, default=1, help=\"Input channels\")\nparser.add_argument(\"--output_channels\", type=float, nargs=1, default=1, help=\"Output channels (Current implementation supports one output channel\")\nparser.add_argument(\"--patch_size\", type=int, nargs=3, default=[128, 128, 64], help=\"Input dimension for the generator\")\nparser.add_argument(\"--batch_size\", type=int, nargs=1, default=1, help=\"Batch size to feed the network (currently supports 1)\")\n# Inference parameters\nparser.add_argument(\"--stride_inplane\", type=int, nargs=1, default=64, help=\"Stride size in 2D plane\")\nparser.add_argument(\"--stride_layer\", type=int, nargs=1, default=16, help=\"Stride size in z direction\")\nargs = parser.parse_args()\n\nif args.Use_GPU is True:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.Select_GPU)\n\n\ndef from_numpy_to_itk(image_np,image_itk):\n image_np = np.transpose(image_np, (2, 1, 0))\n image = sitk.GetImageFromArray(image_np)\n image.SetOrigin(image_itk.GetOrigin())\n image.SetDirection(image_itk.GetDirection())\n image.SetSpacing(image_itk.GetSpacing())\n return image\n\n\ndef prepare_batch(image, ijk_patch_indices):\n image_batches = []\n for batch in ijk_patch_indices:\n image_batch = []\n for patch in batch:\n image_patch = image[patch[0]:patch[1], patch[2]:patch[3], patch[4]:patch[5]]\n image_batch.append(image_patch)\n\n image_batch = np.asarray(image_batch)\n image_batch = image_batch[:, :, :, :, np.newaxis]\n image_batches.append(image_batch)\n\n return image_batches\n\n\n# inference single image\ndef inference(write_image, model, image_path, result_path, resample, resolution, patch_size_x, patch_size_y, patch_size_z, stride_inplane, stride_layer, batch_size=1):\n\n # create transformations to image and labels\n transforms1 = [\n NiftiDataset.Resample(resolution, resample)\n ]\n\n transforms2 = [\n NiftiDataset.Padding((patch_size_x, patch_size_y, patch_size_z))\n ]\n\n # read image file\n reader = sitk.ImageFileReader()\n reader.SetFileName(image_path)\n image = reader.Execute()\n\n # normalize the image\n normalizeFilter = sitk.NormalizeImageFilter()\n resacleFilter = sitk.RescaleIntensityImageFilter()\n resacleFilter.SetOutputMaximum(255)\n resacleFilter.SetOutputMinimum(0)\n image = normalizeFilter.Execute(image) # set mean and std deviation\n image = resacleFilter.Execute(image)\n\n # create empty label in pair with transformed image\n label_tfm = sitk.Image(image.GetSize(), sitk.sitkFloat32)\n label_tfm.SetOrigin(image.GetOrigin())\n label_tfm.SetDirection(image.GetDirection())\n label_tfm.SetSpacing(image.GetSpacing())\n\n sample = {'image': image, 'label': label_tfm}\n\n for transform in transforms1:\n sample = transform(sample)\n\n # keeping track on how much padding will be performed before the inference\n image_array = sitk.GetArrayFromImage(sample['image'])\n pad_x = patch_size_x - (patch_size_x - image_array.shape[2])\n pad_y = patch_size_x - (patch_size_y - image_array.shape[1])\n pad_z = patch_size_z - (patch_size_z - image_array.shape[0])\n\n image_pre_pad = sample['image']\n\n for transform in transforms2:\n sample = transform(sample)\n\n image_tfm, label_tfm = sample['image'], sample['label']\n\n # convert image to numpy array\n image_np = sitk.GetArrayFromImage(image_tfm)\n label_np = sitk.GetArrayFromImage(label_tfm)\n\n label_np = np.asarray(label_np, np.float32)\n\n # unify numpy and sitk orientation\n image_np = np.transpose(image_np, (2, 1, 0))\n label_np = np.transpose(label_np, (2, 1, 0))\n\n # ----------------- Padding the image if the z dimension still is not even ----------------------\n\n if (image_np.shape[2] % 2) == 0:\n Padding = False\n else:\n image_np = np.pad(image_np, ((0,0), (0,0), (0, 1)), 'constant')\n label_np = np.pad(label_np, ((0, 0), (0, 0), (0, 1)), 'constant')\n Padding = True\n\n # ------------------------------------------------------------------------------------------------\n\n # a weighting matrix will be used for averaging the overlapped region\n weight_np = np.zeros(label_np.shape)\n\n # prepare image batch indices\n inum = int(math.ceil((image_np.shape[0] - patch_size_x) / float(stride_inplane))) + 1\n jnum = int(math.ceil((image_np.shape[1] - patch_size_y) / float(stride_inplane))) + 1\n knum = int(math.ceil((image_np.shape[2] - patch_size_z) / float(stride_layer))) + 1\n\n patch_total = 0\n ijk_patch_indices = []\n ijk_patch_indicies_tmp = []\n\n for i in range(inum):\n for j in range(jnum):\n for k in range(knum):\n if patch_total % batch_size == 0:\n ijk_patch_indicies_tmp = []\n\n istart = i * stride_inplane\n if istart + patch_size_x > image_np.shape[0]: # for last patch\n istart = image_np.shape[0] - patch_size_x\n iend = istart + patch_size_x\n\n jstart = j * stride_inplane\n if jstart + patch_size_y > image_np.shape[1]: # for last patch\n jstart = image_np.shape[1] - patch_size_y\n jend = jstart + patch_size_y\n\n kstart = k * stride_layer\n if kstart + patch_size_z > image_np.shape[2]: # for last patch\n kstart = image_np.shape[2] - patch_size_z\n kend = kstart + patch_size_z\n\n ijk_patch_indicies_tmp.append([istart, iend, jstart, jend, kstart, kend])\n\n if patch_total % batch_size == 0:\n ijk_patch_indices.append(ijk_patch_indicies_tmp)\n\n patch_total += 1\n\n batches = prepare_batch(image_np, ijk_patch_indices)\n\n for i in tqdm(range(len(batches))):\n batch = batches[i]\n\n pred = model.predict(batch, verbose=2, batch_size=1) # predict segmentation\n pred = np.squeeze(pred, axis=4)\n\n istart = ijk_patch_indices[i][0][0]\n iend = ijk_patch_indices[i][0][1]\n jstart = ijk_patch_indices[i][0][2]\n jend = ijk_patch_indices[i][0][3]\n kstart = ijk_patch_indices[i][0][4]\n kend = ijk_patch_indices[i][0][5]\n label_np[istart:iend, jstart:jend, kstart:kend] += pred[0, :, :, :]\n weight_np[istart:iend, jstart:jend, kstart:kend] += 1.0\n\n print(\"{}: Evaluation complete\".format(datetime.datetime.now()))\n # eliminate overlapping region using the weighted value\n label_np = (np.float32(label_np) / np.float32(weight_np) + 0.01)\n\n # removed the 1 pad on z\n if Padding is True:\n label_np = label_np[:, :, 0:(label_np.shape[2]-1)]\n\n # removed all the padding\n label_np = label_np[:pad_x, :pad_y, :pad_z]\n\n # convert back to sitk space\n label = from_numpy_to_itk(label_np, image_pre_pad)\n # ---------------------------------------------------------------------------------------------\n\n # save label\n writer = sitk.ImageFileWriter()\n\n if resample is True:\n\n print(\"{}: Resampling label back to original image space...\".format(datetime.datetime.now()))\n # label = resample_sitk_image(label, spacing=image.GetSpacing(), interpolator='bspline') # keep this commented\n label = resize(label, (sitk.GetArrayFromImage(image)).shape[::-1], sitk.sitkBSpline)\n label.SetDirection(image.GetDirection())\n label.SetOrigin(image.GetOrigin())\n label.SetSpacing(image.GetSpacing())\n\n else:\n label = label\n\n writer.SetFileName(result_path)\n if write_image is True:\n writer.Execute(label)\n print(\"{}: Save evaluate label at {} success\".format(datetime.datetime.now(), result_path))\n\n return label\n\n\nif __name__ == \"__main__\":\n\n input_dim = [args.batch_size, args.patch_size[0], args.patch_size[1], args.patch_size[2], args.input_channels]\n model = UNetGenerator(input_dim=input_dim)\n model.load_weights(args.gen_weights)\n\n result = inference(True, model, args.image, args.result, args.resample, args.new_resolution, args.patch_size[0],args.patch_size[1],args.patch_size[2], args.stride_inplane, args.stride_layer)\n","sub_path":"predict_single_image.py","file_name":"predict_single_image.py","file_ext":"py","file_size_in_byte":9726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272088743","text":"from setuptools import setup\n\n\ntry:\n readme = open('README', 'r').read()\nexcept:\n readme = \"Kadot, unsupervised natural language processing.\"\n\n\nsetup(\n name='Kadot',\n version='0.1.9',\n packages=['kadot'],\n url='https://github.com/the-new-sky/Kadot',\n long_description=readme,\n download_url='https://github.com/the-new-sky/Kadot/archive/0.1.9.tar.gz',\n install_requires=['numpy', 'scipy', 'sklearn'],\n license='MIT',\n author='the_new_sky',\n author_email='lorisazerty@gmail.com',\n description='Kadot, unsupervised natural language processing.',\n keywords=['natural language processing', 'text classification', 'text generation', 'tokenizer', 'word embeddings'],\n classifiers=['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Text Processing :: General']\n)\n","sub_path":"pypi_install_script/Kadot-0.1.9.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323590803","text":"import string\nfrom api_utils import user_channel_exists, create_user_channel, delete_user_channel\nfrom find_api_location import findAPILocation\n\n\ndef testFunction(domain_url, session):\n\n\t(status, briefing, message, api_location) = findAPILocation(domain_url)\n\tif status != 0:\n\t\treturn (status, briefing, message, None)\n\n\tusername = \"test_user_channel_not_following\"\n\n\tif delete_user_channel(session, domain_url, api_location, username):\n\n\t\tstatus = 0\n\t\tbriefing = \"Could successfully delete test user channel: %s@%s\" % (username, domain_url)\n\t\tmessage = \"We could successfully assert deletion of test user channel %s@%s.\" % (username, domain_url)\n\t\tmessage += \"
That test user channel was being used for testing purposes.\"\n\t\n\telse:\n\n\t\tif not user_channel_exists(session, domain_url, api_location, username):\t\t\t\n\n\t\t\tstatus = 2\n\t\t\tbriefing = \"The test user channel %s@%s was \" % (username, domain_url)\n\t\t\tbriefing += \"expected to exist but it didn't, so it could not be deleted again.\"\n\t\t\tmessage = briefing\n\n\t\t\tif ( create_user_channel(session, domain_url, api_location, username)\n\t\t\tand delete_user_channel(session, domain_url, api_location, username) ):\n\n\t\t\t\tstatus = 0\n\t\t\t\tadditional_info = \"
But we could assert that user channel deletion is being \"\n\t\t\t\tadditional_info += \"properly implemented by your API server.\"\n\t\t\t\tbriefing += additional_info\n\t\t\t\tmessage += additional_info\n\t\t\t\tmessage += \"
We created the expected test user channel and then were successful in deleting it again.\"\n\t\t\telse:\n\t\t\t\tmessage += \"
The problem is we cannot assert that user channel deletion is working.\"\n\n\t\telse:\n\n\t\t\tstatus = 1\n\t\t\tbriefing = \"The test user channel %s@%s could not be deleted.\" % (username, domain_url)\n\t\t\tmessage = briefing\n\t\t\tmessage += \"
It seems like your HTTP API server is problematic. It had trouble deleting an \"\n\t\t\tmessage += \"user channel - that operation must work.\"\n\t\t\t\n\treturn (status, briefing, message, None)\n","sub_path":"integration/tests/delete_tuc_not_following.py","file_name":"delete_tuc_not_following.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516912737","text":"import pickle\n\nimport numpy as np\n\nfrom ...ops.iou3d_nms import iou3d_nms_utils\nfrom ...utils import box_utils\n\n\nclass DataBaseSampler(object):\n def __init__(self, root_path, sampler_cfg, class_names, logger=None):\n self.root_path = root_path\n self.class_names = class_names\n self.sampler_cfg = sampler_cfg\n self.logger = logger\n self.db_infos = {}\n for class_name in class_names:\n self.db_infos[class_name] = []\n\n for db_info_path in sampler_cfg.DB_INFO_PATH:\n db_info_path = self.root_path.resolve() / db_info_path\n with open(str(db_info_path), 'rb') as f:\n infos = pickle.load(f)\n [self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]\n\n for func_name, val in sampler_cfg.PREPARE.items():\n self.db_infos = getattr(self, func_name)(self.db_infos, val)\n\n self.sample_groups = {}\n self.sample_class_num = {}\n self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False)\n for x in sampler_cfg.SAMPLE_GROUPS:\n class_name, sample_num = x.split(':')\n if class_name not in class_names:\n continue\n self.sample_class_num[class_name] = sample_num\n self.sample_groups[class_name] = {\n 'sample_num': sample_num,\n 'pointer': len(self.db_infos[class_name]),\n 'indices': np.arange(len(self.db_infos[class_name]))\n }\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['logger']\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def filter_by_difficulty(self, db_infos, removed_difficulty):\n new_db_infos = {}\n for key, dinfos in db_infos.items():\n pre_len = len(dinfos)\n new_db_infos[key] = [\n info for info in dinfos\n if info['difficulty'] not in removed_difficulty\n ]\n if self.logger is not None:\n self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))\n return new_db_infos\n\n def filter_by_min_points(self, db_infos, min_gt_points_list):\n for name_num in min_gt_points_list:\n name, min_num = name_num.split(':')\n min_num = int(min_num)\n if min_num > 0 and name in db_infos.keys():\n filtered_infos = []\n for info in db_infos[name]:\n if info['num_points_in_gt'] >= min_num:\n filtered_infos.append(info)\n\n if self.logger is not None:\n self.logger.info('Database filter by min points %s: %d => %d' %\n (name, len(db_infos[name]), len(filtered_infos)))\n db_infos[name] = filtered_infos\n\n return db_infos\n\n def sample_with_fixed_number(self, class_name, sample_group):\n \"\"\"\n Args:\n class_name:\n sample_group:\n Returns:\n\n \"\"\"\n sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']\n if pointer >= len(self.db_infos[class_name]):\n indices = np.random.permutation(len(self.db_infos[class_name]))\n pointer = 0\n\n sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]\n pointer += sample_num\n sample_group['pointer'] = pointer\n sample_group['indices'] = indices\n return sampled_dict\n\n @staticmethod\n def put_boxes_on_road_planes(gt_boxes, road_planes, calib):\n \"\"\"\n Only validate in KITTIDataset\n Args:\n gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n road_planes: [a, b, c, d]\n calib:\n\n Returns:\n \"\"\"\n a, b, c, d = road_planes\n center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])\n cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b\n center_cam[:, 1] = cur_height_cam\n cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]\n mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height\n gt_boxes[:, 2] -= mv_height # lidar view\n return gt_boxes, mv_height\n\n def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):\n gt_boxes_mask = data_dict['gt_boxes_mask']\n gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]\n gt_names = data_dict['gt_names'][gt_boxes_mask]\n points = data_dict['points']\n if self.sampler_cfg.get('USE_ROAD_PLANE', False):\n sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(\n sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']\n )\n data_dict.pop('calib')\n data_dict.pop('road_plane')\n\n obj_points_list = []\n for idx, info in enumerate(total_valid_sampled_dict):\n file_path = self.root_path / info['path']\n obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(\n [-1, self.sampler_cfg.NUM_POINT_FEATURES])\n\n obj_points[:, :3] += info['box3d_lidar'][:3]\n\n # TODO Modified by Pointpainting\n # if points.shape[1] >= 8:\n # score = np.zeros((obj_points.shape[0],4))\n # if 'Pedestrian' in info['path']:\n # score[:, 3] = 1.0\n # obj_points = np.concatenate((obj_points, score), axis=1)\n # elif 'Cyclist' in info['path']:\n # score[:, 1] = 1.0\n # obj_points = np.concatenate((obj_points, score), axis=1)\n # elif 'Car' in info['path']:\n # score[:, 2] = 1.0\n # obj_points = np.concatenate((obj_points, score), axis=1)\n # else:\n # score[:, 0] = 1.0\n # obj_points = np.concatenate((obj_points, score), axis=1)\n\n if self.sampler_cfg.get('USE_ROAD_PLANE', False):\n # mv height\n obj_points[:, 2] -= mv_height[idx]\n\n obj_points_list.append(obj_points)\n\n obj_points = np.concatenate(obj_points_list, axis=0)\n sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])\n\n large_sampled_gt_boxes = box_utils.enlarge_box3d(\n sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH\n )\n points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)\n points = np.concatenate([obj_points, points], axis=0)\n gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)\n gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)\n data_dict['gt_boxes'] = gt_boxes\n data_dict['gt_names'] = gt_names\n data_dict['points'] = points\n return data_dict\n\n def __call__(self, data_dict):\n \"\"\"\n Args:\n data_dict:\n gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n\n Returns:\n\n \"\"\"\n gt_boxes = data_dict['gt_boxes']\n gt_names = data_dict['gt_names'].astype(str)\n existed_boxes = gt_boxes\n total_valid_sampled_dict = []\n for class_name, sample_group in self.sample_groups.items():\n if self.limit_whole_scene:\n num_gt = np.sum(class_name == gt_names)\n sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)\n if int(sample_group['sample_num']) > 0:\n sampled_dict = self.sample_with_fixed_number(class_name, sample_group)\n\n sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)\n\n if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):\n sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)\n\n iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])\n iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])\n iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0\n iou1 = iou1 if iou1.shape[1] > 0 else iou2\n valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]\n valid_sampled_dict = [sampled_dict[x] for x in valid_mask]\n valid_sampled_boxes = sampled_boxes[valid_mask]\n\n existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)\n total_valid_sampled_dict.extend(valid_sampled_dict)\n\n sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]\n if total_valid_sampled_dict.__len__() > 0:\n data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)\n\n data_dict.pop('gt_boxes_mask')\n return data_dict\n","sub_path":"detector/pcdet/datasets/augmentor/database_sampler.py","file_name":"database_sampler.py","file_ext":"py","file_size_in_byte":9084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"273806573","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\" \n@version: v1.0 \n@author: xag \n@license: Apache Licence \n@contact: xinganguo@gmail.com \n@site: http://www.xingag.top \n@software: PyCharm \n@file: level.py \n@time: 2018/7/13 17:36 \n@description:Level的使用\n\"\"\"\n\n# -----------------------------------------------\n# Level等级数值\n# CRITICAL 50 -危急\n# FATAL 50 - 严重\n# ERROR 40 - 错误\n# WARNING 30 - 警告\n# WARN 30 - 警告\n# INFO 20 - 显示\n# DEBUG 10 - Debug\n# NOTSET 0 -\n# ----------------------------------------------\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level=logging.WARNING)\n\n# 系统便只会输出 level 数值大于或等于该 level 的的日志结果\n# 测试\nlogger.debug('Debugging')\nlogger.critical('Critical Something')\nlogger.error('Error Occurred')\nlogger.warning('Warning exists')\nlogger.info('Finished')\n\n# 由于设置了WARNING,这里只会显示critical、error、warning\n","sub_path":"Python/基础/Logging-日志模块/3.level.py","file_name":"3.level.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631125857","text":"# pylint:disable=line-too-long\n\"\"\"\nThe tool to check the availability or syntax of domain, IP or URL.\n\n::\n\n\n ██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗\n ██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝\n ██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗\n ██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝\n ██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗\n ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝\n\nTests of the PyFunceble.helpers.hash\n\nAuthor:\n Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom\n\nSpecial thanks:\n https://pyfunceble.github.io/special-thanks.html\n\nContributors:\n https://pyfunceble.github.io/contributors.html\n\nProject link:\n https://github.com/funilrys/PyFunceble\n\nProject documentation:\n https://pyfunceble.readthedocs.io/en/master/\n\nProject homepage:\n https://pyfunceble.github.io/\n\nLicense:\n::\n\n\n Copyright 2017, 2018, 2019, 2020, 2021 Nissar Chababy\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n# pylint: enable=line-too-long\n\nfrom unittest import TestCase\nfrom unittest import main as launch_tests\n\nfrom PyFunceble.helpers import File, Hash\n\n\nclass TestHash(TestCase):\n \"\"\"\n Tests of the PyFunceble.helpers.hash.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Setup everything needed for the tests.\n \"\"\"\n\n self.file = \"this_file_should_be_deleted\"\n self.data_to_write = [\"Hello World!\", \"Thanks for using PyFunceble\"]\n\n self.expected_hashed = {\n \"md5\": \"ba2e0e1774c2e60e2327f263402facd4\",\n \"sha1\": \"b5c8520cd2c422019997dc6fdbc9cb9d7002356e\",\n \"sha224\": \"863c46d5ed52b439da8f62a791e77c0cbbfb7d92af7c5549279f580d\",\n \"sha384\": \"6492f4b5732e0af4b9edf2c29ee4622c62ee418e5d6e0f34b13cb80560a28256c6e21e949119872d26d2327fc112a63b\", # pylint: disable=line-too-long\n \"sha512\": \"f193ad6ee2cfbecd580225d8e6bfb9df1910e5ca6135b21b03ae208a007f71e9b57b55e299d27157551a18ef4dfdde23c96aaea796064846edc6cd25ac7eaf7f\", # pylint: disable=line-too-long\n \"sha512_224\": \"7c43867047942e9d441f5e3e29ad63ad579bc038bf9eba925ff6896b\",\n }\n\n def test_hash_data_not_string_nor_bytes(self):\n \"\"\"\n Tests the method which let us hash a given data for the case\n that we given a non string or bytes input.\n \"\"\"\n\n given = [1, 2, 3, 4]\n\n self.assertRaises(ValueError, lambda: Hash().data(given))\n\n def test_hash_unknown_algo(self):\n \"\"\"\n Tests the hash class for the case that we give an unknown algo.\n \"\"\"\n\n given = \"\\n\".join(self.data_to_write)\n\n self.assertRaises(ValueError, lambda: Hash(algo=\"Hello, World!\").data(given))\n\n def test_hash_data(self):\n \"\"\"\n Tests the method wich let us hash a given data.\n \"\"\"\n\n given = \"\\n\".join(self.data_to_write)\n\n for algo, expected in self.expected_hashed.items():\n self.assertEqual(expected, Hash(algo=algo).data(given))\n self.assertEqual(expected, Hash(algo=algo).data(given.encode()))\n\n def test_hash_file_not_exists(self):\n \"\"\"\n Tests the method which let us the content of a given file.\n \"\"\"\n\n file_instance = File(self.file)\n\n file_instance.delete()\n\n expected = False\n actual = file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n expected = None\n for algo in self.expected_hashed:\n self.assertEqual(\n expected,\n Hash(algo=algo).file(self.file),\n )\n\n def test_hash_file(self):\n \"\"\"\n Tests the method which let us the content of a given file.\n \"\"\"\n\n file_instance = File(self.file)\n\n expected = False\n actual = file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n file_instance.write(\"\\n\".join(self.data_to_write))\n\n expected = True\n actual = file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n for algo, expected in self.expected_hashed.items():\n self.assertEqual(\n expected,\n Hash(algo=algo).file(self.file),\n )\n\n file_instance.delete()\n\n expected = False\n actual = file_instance.exists()\n\n self.assertEqual(expected, actual)\n\n\nif __name__ == \"__main__\":\n launch_tests()\n","sub_path":"tests/test_helpers_hash.py","file_name":"test_helpers_hash.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154905828","text":"import falcon\n\nfrom api.base import Endpoint\nfrom api.conf import TRANSFER_FEE\nfrom api.db.models import Account, FinanceOperationEnum, Transaction\nfrom api.misc.utils import exchange_currency\nfrom api.misc.validators import request_schema, response_schema\nfrom api.schemas import RequestTransferMoneySchema, ResponseTransactionSchema\n\nfrom .hooks import auth_only, extract_account\n\n\n@falcon.before(auth_only)\n@falcon.before(extract_account)\nclass AccountEndpoint(Endpoint):\n @request_schema(RequestTransferMoneySchema())\n @response_schema(ResponseTransactionSchema())\n def on_post(self, req, resp, current_user, dst_account, **kwargs):\n \"\"\"\n ---\n tags: [Finances]\n summary: Money transfer\n security: [BearerAuth: []]\n description: >+\n Transfer money TO the {account_id} target account.\n You cannot transfer money between accounts with\n different currencies.
\n Note: transfer money between your own accounts is\n free.
\n But if you want to move your money somewhere else,\n transfer fee will be charged. Amount depends on currency.\n parameters:\n - in: path\n name: accountId\n example: 4\n description: Whoose account is going to receive money\n schema:\n type: integer\n requestBody:\n content:\n application/json:\n schema: RequestTransferMoneySchema\n responses:\n 200:\n description: You will get outcoming transaction object that\n means transfer has been completed\n content:\n application/json:\n schema: ResponseTransactionSchema\n 401:\n description: You must authorize first\n 403:\n description: Something went wrong (see examples)\n content:\n application/json:\n examples:\n Bad source account:\n description: It happend when accountId from body belongs\n to another user or isn't exist\n value: {code: 'BAD_SOURCE_ACCOUNT'}\n Bad target account:\n description: Target account from path isn't exist or it\n account is equal to account from\n request's body\n value: {code: 'BAD_TARGET_ACCOUNT'}\n You have not enought money:\n description: When your have less money than you request\n value: {code: 'NOT_ENOUGHT_MONEY'}\n Result won't make sense:\n description: Math error converts target sum to zero\n value: {code: 'BAD_EXCHANGING_RESULT'}\n \"\"\"\n\n src_account = Account.get_or_none(Account.id == req.data.account_id,\n Account.user == current_user)\n if not src_account:\n raise falcon.HTTPForbidden(code='BAD_SOURCE_ACCOUNT')\n\n if not dst_account or dst_account.id == src_account.id:\n raise falcon.HTTPForbidden(code='BAD_TARGET_ACCOUNT')\n\n src_amount = dst_amount = req.data.amount\n\n # Transfer fee will be charging based on source currency\n transfer_fee = TRANSFER_FEE.get(src_account.currency.value, 0) \\\n if dst_account.user != current_user else 0\n\n # We have to convert dst_amount when currencies are different\n if dst_account.currency != src_account.currency:\n dst_amount = exchange_currency(\n src_amount,\n src_account.currency.name, # Use currency code, ex. USD\n dst_account.currency.name\n )\n\n if not dst_amount:\n raise falcon.HTTPForbidden(code='BAD_EXCHANGING_RESULT')\n\n # We're using serializable isolation level\n with self.app.db.transaction():\n # How much money do we have?\n current_amount = Account\\\n .select()\\\n .where(Account.id == src_account.id)\\\n .for_update()\\\n .get().balance\n\n # Are we able to pay?\n debit = src_amount + transfer_fee\n if current_amount - debit < 0:\n raise falcon.HTTPForbidden(code='NOT_ENOUGHT_MONEY')\n\n # Decrease our balanace and add record(s) into transactions logs\n Account.update(balance=Account.balance - debit)\\\n .where(Account.id == src_account.id)\\\n .execute()\n\n outcoming_transaction = Transaction.create(\n account=src_account,\n operation=FinanceOperationEnum.TRANSFER_OUTCOMING,\n participant_account=dst_account.id,\n amount=src_amount\n )\n\n # Add record about fee charging as an another operation\n if transfer_fee:\n Transaction.create(\n account=src_account,\n operation=FinanceOperationEnum.FEE_TRANSFER,\n amount=transfer_fee\n )\n\n # Increase target balance\n Account.update(balance=Account.balance + dst_amount)\\\n .where(Account.id == dst_account.id)\\\n .execute()\n\n Transaction.create(\n account=dst_account,\n operation=FinanceOperationEnum.TRANSFER_INCOMING,\n participant_account=src_account,\n amount=dst_amount\n )\n\n resp.data = outcoming_transaction\n","sub_path":"api/endpoints/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"459724952","text":"import Crypto.Util.number\nfrom Crypto.Util.number import bytes_to_long, long_to_bytes\nfrom random import randint\n\nbits = 256\nprint(\"Số bit yêu cầu của bài là:\",bits)\n# Khóa công khai (p, alpha, beta), khóa bí mật a\np=Crypto.Util.number.getPrime(bits, randfunc=Crypto.Random.get_random_bytes)\n\"\"\"alpha là phần tử nguyên thủy theo mod p nhưng với bài tập này số p quá lớn\nmáy tính của em k đáp ứng được việc tìm phần tử nguyên thủy nên em đã chọn một số \nbất kì làm phần tử nguyên thùy. Code để tìm phần tử nguyên thủy với các số nhỏ hơn\nđược gửi ở file primitiveRoots.py đính kèm\"\"\"\nalpha = 2\n#Chọn số a\na = 5090\nbeta = pow(alpha, a, p)\nprint(\"\\nKhóa công khai (p,alpha,beta)\\n\"+\"p =\",p,\"\\nalpha =\",alpha,\"\\nbeta =\",beta)\nprint(\"-----------------\")\nprint(\"Khóa bí mật a\\n\"+\"a =\",a)\n#Tìm số k thuộc Zp-1\nk = randint(0, p-2)\n\n#Tiến hành mã hóa\nmsg=\"Khanh-18020699\"\nprint(\"\\nBản tin là:\",msg)\nm = bytes_to_long(msg.encode('utf-8'))\ny1 = pow(alpha, k, p)\ny2 = (pow(beta, k, p) * (m%p)) % p\n#Tiến hành giải mã\ndecipher = (y2*Crypto.Util.number.inverse(pow(y1,a,p),p))%p\nm1 = long_to_bytes(decipher)\nm1 = m1.decode('utf-8')\nprint(\"\\nBản mã Elgamal:\",\"\\ny1 =\",y1,\"\\ny2 =\",y2)\nprint(\"\\nGiải mã Elgamal:\",decipher,\"tương đương bản tin:\",m1)\n\n","sub_path":"elgamal.py","file_name":"elgamal.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"217704882","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom migrate.changeset import UniqueConstraint\nfrom sqlalchemy import MetaData, Table\n\nfrom nova.db.sqlalchemy import utils\n\n\nUC_NAME = \"uniq_vlan_x_deleted\"\nCOLUMNS = ('vlan', 'deleted')\nTABLE_NAME = 'networks'\n\n\ndef upgrade(migrate_engine):\n meta = MetaData(bind=migrate_engine)\n t = Table(TABLE_NAME, meta, autoload=True)\n\n utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME,\n True, *COLUMNS)\n uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME)\n uc.create()\n\n\ndef downgrade(migrate_engine):\n utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS)\n","sub_path":"nova/db/sqlalchemy/migrate_repo/versions/158_add_networks_uc.py","file_name":"158_add_networks_uc.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"6219348","text":"'''\nCreated on Oct 18, 2016\n\n@author: Truong Huynh Tien\n\nIf you are using the algorithms, policies or workload included in the code please cite the following paper:\nN. Quang-Hung and N. Thoai, \"EMinRET: Heuristic for Energy-Aware VM Placement with Fixed Intervals and Non-preemption,\" 2015 International Conference on Advanced Computing and Applications (ACOMP), Ho Chi Minh City, 2015, pp. 98-105. DOI: 10.1109/ACOMP.2015.27\nURL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7422380&isnumber=7422358\n\n Authors: Truong Huynh Tien, Nguyen Quang-Hung\n'''\n\ntry:\n import configparser #for python 3xx\nexcept:\n import ConfigParser #for python 2xx\n \nimport json\n\nclass EMINTRE_Scheduler:\n '''\n classdocs : giai thuat EMinTRE\n '''\n def __init__(self):\n '''\n Constructor giai thuat lap lich EMinTRE\n '''\n #lay danh sach thong tin ve instance\n with open('./output/list_instances.json') as input_file:\n self.instances_dictionary = json.load(input_file)\n \n #doc thong so allocation ratio tu file infor.conf\n try :\n config = configparser.RawConfigParser() #for python 3xx\n except NameError:\n config = ConfigParser.RawConfigParser() #for python 2xx\n \n config.read('./input/infor.conf')\n \n self.Cpu_allocation_ratio = int(config.get('allocation_ratio', 'cpu_allocation_ratio'))\n self.Ram_allocation_ratio = int(config.get('allocation_ratio', 'ram_allocation_ratio'))\n self.Disk_allocation_ratio = int(config.get('allocation_ratio', 'disk_allocation_ratio'))\n\n def filter_host(self, instance_id, list_hosts):\n '''\n @note: chon ra cac host du tai nguyen de chay isntance\n - do phuc tap: O(n) voi n la so host\n '''\n #tao danh sach chua cac host du tai nguyen\n list_filted_hosts = []\n \n #danh gia tung host trong danh sach\n for host_id in list_hosts.keys(): \n #tinh toan tai nguyen con lai cua moi may \n vcpu_rest = (list_hosts[host_id]['vcpus'] - list_hosts[host_id]['vcpus_used'])*self.Cpu_allocation_ratio\n ram_rest = (list_hosts[host_id]['memory_mb'] - list_hosts[host_id]['memory_mb_used'])*self.Ram_allocation_ratio\n disk_rest = (list_hosts[host_id]['local_gb'] - list_hosts[host_id]['local_gb_used'])*self.Disk_allocation_ratio\n \n #so sanh tai nguyen moi may con lai voi tai nguyen instance can \n if vcpu_rest >= self.instances_dictionary[instance_id]['VCPUs_core'] and \\\n ram_rest >= self.instances_dictionary[instance_id]['Ram_mb'] and \\\n disk_rest >= self.instances_dictionary[instance_id]['Disk_gb']:\n \n list_filted_hosts = list_filted_hosts + [host_id]\n return list_filted_hosts\n \n def canSwap(self, instance_id_1, isntance_id_2):\n '''\n @note: ham nay de mo rong ve sau, van de migration\n '''\n \n def find_instance(self, time_curr, list_instances):\n '''\n @note: ham nay dung de tim tat ca cac instance co time_start trung voi thoi diem dang xet\n - do phuc tap: O(n) voi n la so instance\n '''\n # tao danh sach chuac cac instance thoa dieu kien\n list_instance = []\n \n #xet tung instance trong danh sach\n for instance in list_instances.keys():\n if float(list_instances[instance]['Time_start_s']) == time_curr:\n #them id cua instance thoa dieu kien vao danh sach\n list_instance = list_instance + [instance]\n return list_instance\n \n def mapping(self, instance_id, hosts_list):\n '''\n @note: tim host phu hop de chay instance\n - do phuc tap: O(n) voi n la so host\n '''\n #lay danh sach cac host duoc chon de chay isntance\n list_filter_hosts = self.filter_host(instance_id, hosts_list)\n \n #neu danh sach rong, khong co host nao du tai nguyen de chay instance\n # thoat ra va bao lap lich that bai \n if 0 == len(list_filter_hosts):\n return -1\n \n #tao bien tham chieu toi id cua host dc chon\n allocated_host_id = -1\n \n #neu chi co 1 host thoa dieu kien => gan instance chay tren host do\n if 1 == len(list_filter_hosts):\n allocated_host_id = list_filter_hosts[0]\n return allocated_host_id\n\n MinRET = 999999999\n \n #sap xep cac host theo thu tu\n #boi vi dac diem cua vong for trong python\n #viec nay can thiet de lc xet tung host se theo 1 thu tu nhat dinh\n #trong cac lan chay khac nhau\n list_filter_hosts.sort()\n \n #xet cac host thoa dieu kien \n for host in list_filter_hosts:\n #tinh time_different\n time_diff = max(self.instances_dictionary[instance_id]['Time_end_s'] - hosts_list[host]['Time_end_s'], 0) / (max(self.instances_dictionary[instance_id]['Time_end_s'] - hosts_list[host]['Time_end_s'], 0) + (hosts_list[host]['Time_end_s'] - hosts_list[host]['Time_start_s'])) \n \n #tinh trong so RET\n RET = self.tinh_RET(time_diff, instance_id, host, hosts_list)\n \n #thuc hien so sanh de tim ra RET nho nhat trong cac host\n if(MinRET > RET):\n allocated_host_id = host \n MinRET = RET\n \n #tra ve id cua host co RET nho nhat\n return allocated_host_id\n \n def scheduler(self):\n '''\n @note: ham nay dua tham so dau vao cho ham lap lich\n khi chay se goi ham nay\n \n '''\n #tao danh sach tham khao chinh cho host\n #moi thay doi trong qua rinh lap lich se khong duoc thay doi ngay\n #ma thay doi tren danh sach nay, sau cung moi duoc luu vao file\n #muc dich la de tranh quan ly truy xuat nhieu lan, doc ghi nhieu lan vao data\n with open('./input/hosts_list.json') as input_instances: \n hosts = json.load(input_instances)\n \n #lay danh sach cac su kien chinh\n with open('./output/list_time_events.json') as time: \n time_events = json.load(time)\n \n #lay danh sach instance\n with open('./output/list_instances.json') as time: \n instances = json.load(time) \n \n #tien hanh lap lich\n self.scheduling(hosts, time_events, instances)\n \n return 1\n \n def scheduling(self, hosts,time_events, instances):\n '''\n @note: ham lap lich, dua theo danh sach cac su kien chinh\n muc dich la sap xep cac instance theo thu tu thoi gian start\n params: - hosts: danh sach cac host\n - time_events: danh sach nhung su kien se xet, \n thuong la time start va time end cua cac isntance\n - instances: danh sach isntance can lap lich\n - do phuc tap: O(T x I x N)\n trong do:\n T: len(time_events) so thoi diem xet\n I: so instance\n N: so host\n ''' \n #khai bao bien de kiem soat qua trinh duyet danh sach su kien\n time_curr = 0\n \n #tao danh sach hang doi cho instance nao khong the sap xep lich \n waiting_instance = []\n \n #duyet danh sach su kien, toi khi nao duyet het danh sach\n while(time_curr < len(time_events)):\n #lay 1 thoi diem ra de xet\n now = time_events[time_curr]\n #tim tat ca cac instance co start_time trung voi thoi diem dang xet\n list_instance_id = self.find_instance(now, instances)\n\n #giai phong tai nguyen neu co instance nao ket thuc\n #duyet tat ca host\n self.update_hosts(now, hosts)\n \n #xet tung instance\n for instance_id in list_instance_id:\n #lay id cua host phu hop voi instance\n host_id = self.mapping(instance_id, hosts)\n \n #neu khong co id nao phu hop thi instance them vao danh sach waiting_instance\n #neu co thi them vao danh sach instance cua host do \n if -1 == host_id:\n\n #tao cau truc du lieu cho instance de luu vao host\n element = {}\n element['1_instance_id'] = instance_id\n element['2_time_start_s'] = self.instances_dictionary[instance_id]['Time_start_s']\n element['4_waiting_time_s'] = 0\n element['3_time_end_s'] = self.instances_dictionary[instance_id]['Time_end_s']\n element['5_status'] = 'waiting'\n waiting_instance = waiting_instance + [element]\n continue\n else:\n self.update_host(instance_id, host_id, hosts, now)\n '''\n #xet lai cac instance trong waiting_instance\n for element in waiting_instance:\n #lay id cua host phu hop voi instance\n host_id = self.mapping(element['1_instance_id'], hosts)\n\n #neu khong co id nao phu hop thi instance tiep tuc cho\n #neu co thi xoa ten instance do ra khoi waiting_instance dong thoi update host\n if -1 == host_id:\n continue \n else: \n self.update_host(element['1_instance_id'], host_id, hosts, now)\n #update them cac Time_end_s cua instance, vi Time_end_s da thay doi\n time_end_s = now + element['3_time_end_s'] - element['2_time_start_s']\n time_events.append(time_end_s)\n time_events.sort()\n waiting_instance.remove(element)\n ''' \n #vong lap tiep tuc xet thoi diem tiep theo trong danh sach su kien \n time_curr = time_curr + 1\n\n #luu xuong file JSON\n with open('./output_scheduler/scheduler.json', 'w') as schdlr:\n json.dump(hosts, schdlr, sort_keys=True, indent=4)\n with open('./output_scheduler/waiting_instance.json', 'w') as wi:\n json.dump(waiting_instance, wi, sort_keys=True, indent=4) \n \n def update_host(self, instance_id, host_id, hosts_list, time_now):\n '''\n @note: cap nhat thong tin cua host khi co them 1 instance chay tren do\n do phuc tap: O(1)\n ''' \n #update host duoc chon\n hosts_list[host_id]['memory_mb_used'] = hosts_list[host_id]['memory_mb_used'] + self.instances_dictionary[instance_id]['Ram_mb']\n hosts_list[host_id]['vcpus_used'] = hosts_list[host_id]['vcpus_used'] + self.instances_dictionary[instance_id]['VCPUs_core']\n hosts_list[host_id]['local_gb_used'] = hosts_list[host_id]['local_gb_used'] + self.instances_dictionary[instance_id]['Disk_gb']\n hosts_list[host_id]['running_vms'] = hosts_list[host_id]['running_vms'] + 1\n hosts_list[host_id]['runned_vms'] = hosts_list[host_id]['runned_vms'] + 1\n \n if(hosts_list[host_id]['Time_end_s'] < time_now - self.instances_dictionary[instance_id]['Time_start_s'] + self.instances_dictionary[instance_id]['Time_end_s']):\n hosts_list[host_id]['Time_end_s'] = time_now - self.instances_dictionary[instance_id]['Time_start_s'] + self.instances_dictionary[instance_id]['Time_end_s']\n \n #tao cau truc du lieu cho instance de luu vao host\n element = {}\n element['1_instance_id'] = instance_id\n element['2_time_start_s'] = time_now\n element['4_waiting_time_s'] = time_now - self.instances_dictionary[instance_id]['Time_start_s']\n element['3_time_end_s'] = time_now - self.instances_dictionary[instance_id]['Time_start_s'] + self.instances_dictionary[instance_id]['Time_end_s']\n element['5_status'] = 'running'\n \n #them thong tin cua instance vao danh sach intance cua host\n hosts_list[host_id]['List_instance'] = hosts_list[host_id]['List_instance'] + [element]\n \n def update_hosts(self, time, hosts_list):\n '''\n @note: cap nhat thong tin cua tat ca host tai 1 thoi diem\n cu the la kiem tra xem instance nao chay ket thuc thi giai phong tai nguyen cho host\n - do phuc tap: O(n) voi n la so host trong danh sach hosts_list \n '''\n for host_id in hosts_list.keys():\n for element in hosts_list[host_id]['List_instance']:\n if element['3_time_end_s'] == time:\n #giai phong tai nguyen cho host\n hosts_list[host_id]['memory_mb_used'] = hosts_list[host_id]['memory_mb_used'] - self.instances_dictionary[element['1_instance_id']]['Ram_mb']\n hosts_list[host_id]['vcpus_used'] = hosts_list[host_id]['vcpus_used'] - self.instances_dictionary[element['1_instance_id']]['VCPUs_core']\n hosts_list[host_id]['local_gb_used'] = hosts_list[host_id]['local_gb_used'] - self.instances_dictionary[element['1_instance_id']]['Disk_gb']\n hosts_list[host_id]['running_vms'] = hosts_list[host_id]['running_vms'] - 1\n element['5_status'] = 'shutdown'\n \n if element['3_time_end_s'] < time and \\\n element['5_status'] == 'running' :\n #giai phong tai nguyen cho host\n hosts_list[host_id]['memory_mb_used'] = hosts_list[host_id]['memory_mb_used'] - self.instances_dictionary[element['1_instance_id']]['Ram_mb']\n hosts_list[host_id]['vcpus_used'] = hosts_list[host_id]['vcpus_used'] - self.instances_dictionary[element['1_instance_id']]['VCPUs_core']\n hosts_list[host_id]['local_gb_used'] = hosts_list[host_id]['local_gb_used'] - self.instances_dictionary[element['1_instance_id']]['Disk_gb']\n hosts_list[host_id]['running_vms'] = hosts_list[host_id]['running_vms'] - 1\n element['5_status'] = 'shutdown' \n \n def tinh_RET(self, time_diff, instance_id, host_id, hosts_list):\n '''\n tinh trong so RET cua moi host\n - do phuc tap: O(1)\n '''\n #lay cac so ratio\n Cpu_allocation_ratio = self.Cpu_allocation_ratio\n Ram_allocation_ratio = self.Ram_allocation_ratio\n Disk_allocation_ratio = self.Disk_allocation_ratio\n \n #doc file config lay cac trong so weight\n try :\n config = configparser.RawConfigParser() #for python 3xx\n except NameError:\n config = ConfigParser.RawConfigParser() #for python 2xx\n config.read('./input/infor.conf')\n\n w_ram = int(config.get('weight', 'ram_weight'))\n w_disk = int(config.get('weight', 'disk_weight'))\n w_cpu = int(config.get('weight', 'cpu_weight'))\n w_time = int(config.get('weight', 'time_weight'))\n \n ram_RE = (\n (\n 1 - (\n (\n hosts_list[host_id]['memory_mb_used']*Ram_allocation_ratio + self.instances_dictionary[instance_id]['Ram_mb']\n )\n /hosts_list[host_id]['memory_mb']*Ram_allocation_ratio\n )\n )*w_ram\n )**2\n \n cpu_RE = (\n (\n 1 - (\n (\n hosts_list[host_id]['vcpus_used']*Cpu_allocation_ratio + self.instances_dictionary[instance_id]['VCPUs_core']\n )\n /hosts_list[host_id]['vcpus']*Cpu_allocation_ratio\n )\n )*w_cpu\n )**2\n\n disk_RE = (\n (\n 1 - (\n (\n hosts_list[host_id]['local_gb_used']*Disk_allocation_ratio + self.instances_dictionary[instance_id]['Disk_gb']\n )\n /hosts_list[host_id]['local_gb']*Disk_allocation_ratio\n )\n )*w_disk\n )**2 \n \n RE_total = ram_RE + cpu_RE + disk_RE\n RET = RE_total + (time_diff*w_time)**2\n \n return RET ","sub_path":"scheduler/EMinTRE.py","file_name":"EMinTRE.py","file_ext":"py","file_size_in_byte":16552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"807922","text":"# Dallin Romney\r\n\r\n# Ask for user input for file name and open file\r\nfname = input(\"Enter file name: \")\r\n\r\n# Default file name\r\nif len(fname) < 1 : fname = \"mbox-short.txt\"\r\n\r\n# Open file\r\nfh = open(fname)\r\n\r\ncountDict = dict() # Empty dictionary for \"histogram\"\r\n\r\n# For each line in the file, find sender values and log them\r\n# in the dictionary\r\nfor line in fh:\r\n if line.startswith('From '):\r\n words = line.split()\r\n time = words[5]\r\n times = time.split(':')\r\n hour = times[0]\r\n countDict[hour] = countDict.get(hour, 0) + 1\r\n\r\n# Print results\r\nfor k, v in sorted(countDict.items()):\r\n print(k, v)\r\n\r\n","sub_path":"02 - Python Data Structures/07 Counting in Dictionaries (Histograms).py","file_name":"07 Counting in Dictionaries (Histograms).py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"340279394","text":"import codecs\nimport re\nimport collections\nimport pkg_resources\nimport random\n\nKOR_DICT = './korean.dict'\n\npronunciations = None\nlookup = None\nrhyme_lookup = None\n\ndef _stream(resource_name):\n stream = pkg_resources.resource_stream(__name__, resource_name)\n return stream\n\ndef dict_stream():\n stream = _stream(KOR_DICT)\n return stream\n\ndef parse_kordict(korhd):\n pronunciations = list()\n for line in korhd:\n line = line.strip().decode('utf-8')\n line = line.replace('\\ufeff', '')\n word, phones = line.split(\" \", 1)\n pronunciations.append((word, phones))\n return pronunciations\n\ndef init_kodict(filehandle=None):\n global pronunciations, lookup\n if pronunciations is None:\n if filehandle is None:\n filehandle = dict_stream()\n pronunciations = parse_kordict(filehandle)\n filehandle.close()\n lookup = collections.defaultdict(list)\n for word, parses in pronunciations:\n if lookup.get(word) is None:\n lookup[word].append(parses)\n\ndef sorting_rhyme(rhyme_list):\n parse_word = collections.defaultdict(list)\n for line in rhyme_list:\n parses = parses_for_word(line)\n parse = parses.replace(\" \",'')\n reverse_chars = parse[::-1]\n parse_word[line].append(reverse_chars)\n\n sorting_word = sorted(parse_word.items(), key=lambda t: t[1])\n print(sorting_word)\n\n result = []\n for line in sorting_word:\n result.append(line[0].replace('\\r', ''))\n\n return result\n\ndef rhymes(word):\n init_kodict()\n parse = parses_for_word(word)\n rhyme_point = collections.defaultdict(list)\n for word, parses in pronunciations:\n point = calculate_point(parse,parses)\n rhyme_point[word].append(point)\n rhyme_word = sorted(rhyme_point.items(),key= lambda t:t[1] ,reverse = True)\n rhymes = []\n for tuple in rhyme_word:\n if tuple[1][0] > 80 and tuple[1][0] != 100 :\n rhymes.append(tuple[0])\n return rhymes\n\ndef nonefinding(rhymescheme,rhyme_list):\n parse_sch = parses_for_word(rhymescheme)\n score = []\n for word in rhyme_list:\n parses = parses_for_word(word)\n point = calculate_point(parse_sch,parses)\n score.append(point)\n selections = []\n for i in score:\n num = score.index(i)\n if i == max(score):\n selections.append(rhyme_list[num])\n score[num]=0\n select = random.choice(selections)\n float_rhyme = rhyme_list.index(select)\n return float_rhyme\n\n\n # parse 가 기준 단어\n # parses는 dictionary에 있는 단어\ndef calculate_point(parse,parses):\n point = 0 # 총 포인트\n\n plus = 5 # 가산점\n # 단어 내 문자갯수를 세기 위해서 쪼개는 부분\n parse_list = parse.split()\n parses_list = parses.split()\n\n length = len(parse_list)\n if len(parses_list) == len(parse_list):\n point += 10*3 # 길이 같으면 가산점\n else:\n if len(parses_list) > len(parse_list):\n #문자 갯수 차이클수록 점수 적음\n num = len(parses_list) - len(parse_list)\n if num < 3:\n point += 10*(3- num)\n\n length = len(parse_list)\n index = -(length*3+(length-1))\n parses = parses[index:]\n else:\n num = len(parse_list) - len(parses_list)\n if num < 3:\n point += 10 * (3 - num)\n\n length = len(parses_list)\n index = -(length * 3 + (length - 1))\n parse = parse[index:]\n\n parse_list = parse.split()\n parses_list = parses.split()\n for i in range(length - 1, -1, -1):\n c_point = 0 # 글자당 포인트\n # 초성\n if parses_list[i][0] == parse_list[i][0]:\n c_point +=1\n # 중성 \n if parses_list[i][1] == parse_list[i][1]:\n c_point +=3\n # 종성\n if parses_list[i][2] == parse_list[i][2]:\n c_point +=2\n c_point*=plus # 가산점주기\n if plus != 1:\n plus-=1\n point+=c_point\n\n whole_point = 0\n for i in range(length):\n whole_point+=(5-i)*6\n whole_point+=30\n\n if point == 0 :\n point +=1\n return ( point / whole_point ) * 100\n\ndef parses_for_word(word) :\n BASE_CODE, CHOSUNG, JUNGSUNG = 44032, 588, 28\n\n CHOSUNG_LIST = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']\n JUNGSUNG_LIST = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ',\n 'ㅢ', 'ㅣ']\n # 비슷한 중성 모음 만들기!!!\n JONGSUNG_LIST = [' ', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ',\n 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']\n\n split_word = list(word)\n\n cha = []\n\n for split in split_word:\n if re.match('.*[ㄱ-ㅎㅏ-ㅣ가-힣]+.*', split) is not None:\n char_code = ord(split) - BASE_CODE\n char1 = int(char_code / CHOSUNG)\n char2 = int((char_code - (CHOSUNG * char1)) / JUNGSUNG)\n char3 = int((char_code - (CHOSUNG * char1) - (JUNGSUNG * char2)))\n\n if JONGSUNG_LIST[char3] == ' ':\n cha.append(CHOSUNG_LIST[char1] + JUNGSUNG_LIST[char2] + \"P\")\n else:\n if char1 < 0 or char2 < 0 or char3 < 0 :\n continue\n cha.append(CHOSUNG_LIST[char1] + JUNGSUNG_LIST[char2] + JONGSUNG_LIST[char3])\n\n parse_word = \" \".join(cha)\n\n return parse_word\n\n","sub_path":"pronouncing_kr.py","file_name":"pronouncing_kr.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"256339816","text":"def special_for(iterable):\n\titerator=iter(iterable)\n\twhile True:\n\t\ttry:\n\t\t\tprint(iterator)\n\t\t\tprint(next(iterator)*2)\n\t\texcept StopIteration:\n\t\t\tbreak\n\nspecial_for([1,2,3])\n# all are in same memory location\n\nclass MyGen():\n\tcurrent=0\n\tdef __init__(self,first,last):\n\t\tself.first=first\n\t\tself.last=last\n\n\tdef __iter__(self):\n\t\treturn self\n\t\n\tdef __next__(self):\n\t\tif MyGen.current 2:\n await self.message.channel.send(\"The command only requires a name/mention as a parameter.\")\n elif len(args[0]) == 2:\n user = args[0][1]\n if not user.find('<@'):\n try:\n me = False\n kicked = False\n id = user.split('@')[1].split('>')[0]\n id = id.replace('!', '')\n guild = self.message.author.guild\n for member in guild.members:\n user = str(member.id)\n if user == id:\n name = member.display_name\n if not name.find('Autumn'):\n await self.message.channel.send(\"I can't kick myself from the server, silly.\")\n me = True\n else:\n await member.kick()\n kicked = True\n break\n if not me:\n if not kicked:\n await self.message.channel.send(\"A user with that name could not be found.\")\n else:\n await self.message.channel.send(member.mention + \" has been kicked from the server!\")\n except: pass\n #print(user)\n else:\n await self.message.channel.send(\"That's not a real person!\")\n else:\n await self.message.channel.send(\"The command requires a valid name or mention to function.\")\n else:\n await self.message.channel.send(\"Only an administrator can kick people from a server.\")\n","sub_path":"Autumn/plugins/discord-autumn_kick.py","file_name":"discord-autumn_kick.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26182845","text":"\"\"\"\nThe Generator for our simple MNIST GAN.\n\"\"\"\n\nimport argparse\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision as tv\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super().__init__()\n self._fc1 = nn.Linear(784, 150, bias=False)\n self._fc2 = nn.Linear(150, 1, bias=False)\n\n nn.init.normal_(self._fc1.weight, mean=0, std=0.1)\n nn.init.normal_(self._fc2.weight, mean=0, std=0.1)\n\n def forward(self, x):\n x = x.view(-1, 784)\n x = F.relu( self._fc1(x) )\n x = torch.sigmoid( self._fc2(x) )\n return x\n\n\ndef _test_main(args):\n discriminator = Discriminator()\n if args.cuda >= 0:\n x = torch.cuda.FloatTensor(args.batch_size, 1, 28, 28).uniform_(0,1)\n else:\n x = torch.FloatTensor(args.batch_size, 1, 28, 28).uniform_(0,1)\n pred = discriminator(x)\n print(\"Predictions:\")\n print(pred)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--batch-size\", type=int, default=8)\n parser.add_argument(\"--cuda\", type=int, default=-1,\n help=\"Cuda device number, select -1 for cpu\")\n args = parser.parse_args()\n _test_main(args)\n\n","sub_path":"1_FundamentalsOfGANs/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250819467","text":"import unittest\n\nfrom srp import *\n#Las pruebas unitarias que realize mientras refactorizaba\nclass SrpTest(unittest.TestCase):\n def test_string(self):\n user = Usuario(\n nombre='Ramanujan',\n edad=25,\n direccion='Calle X, #Y Colonia Z'\n )\n self.assertEqual(user.serializar(\"string\"),\"Nombre: Ramanujan\\nEdad: 25\\nDireccion: Calle X, #Y Colonia Z\")\n\n def test_dic(self):\n user = Usuario(\n nombre='Ramanujan',\n edad=25,\n direccion='Calle X, #Y Colonia Z'\n )\n self.assertEqual(user.serializar(\"diccionario\"),{'nombre': 'Ramanujan', 'edad': 25, 'direccion': 'Calle X, #Y Colonia Z'})\n\n def test_json(self):\n user = Usuario(\n nombre='Ramanujan',\n edad=25,\n direccion='Calle X, #Y Colonia Z'\n )\n self.assertEqual(user.serializar(\"json\"),'{\"nombre\": \"Ramanujan\", \"edad\": 25, \"direccion\": \"Calle X, #Y Colonia Z\"}')\n\n def test_html(self):\n user = Usuario(\n nombre='Ramanujan',\n edad=25,\n direccion='Calle X, #Y Colonia Z'\n )\n self.assertEqual(user.serializar(\"html\"), '
nombreRamanujan
edad25
direccionCalle X, #Y Colonia Z
')\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Ene-Jun-2021/perez-sanchez-jose-jahir/Examen/Ejercicio5/srp_test.py","file_name":"srp_test.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538156860","text":"import io\nimport os\nimport torch\nimport numpy as np\nfrom utils.text import text_to_sequence, phoneme_to_sequence\nfrom utils.generic_utils import load_config, setup_model\nfrom utils.audio import AudioProcessor\nfrom utils.text.symbols import phonemes, symbols\nfrom models.tacotron import Tacotron\n\n\nclass Synthesizer(object):\n def load_model(self, model_path, model_name, model_config, use_cuda):\n model_config = os.path.join(model_path, model_config)\n self.model_file = os.path.join(model_path, model_name)\n print(\" > Loading model ...\")\n print(\" | > model config: \", model_config)\n print(\" | > model file: \", self.model_file)\n config = load_config(model_config)\n self.config = config\n self.use_cuda = use_cuda\n self.ap = AudioProcessor(**config.audio)\n num_chars = len(phonemes) if config.use_phonemes else len(symbols)\n self.model = setup_model(num_chars, config)\n # load model state\n if use_cuda:\n cp = torch.load(self.model_file)\n else:\n cp = torch.load(\n self.model_file, map_location=lambda storage, loc: storage)\n # load the model\n self.model.load_state_dict(cp['model'])\n if use_cuda:\n self.model.cuda()\n self.model.eval()\n\n def save_wav(self, wav, path):\n # wav *= 32767 / max(1e-8, np.max(np.abs(wav)))\n wav = np.array(wav)\n self.ap.save_wav(wav, path)\n\n def tts(self, text, path):\n text_cleaner = [self.config.text_cleaner]\n wavs = []\n for sen in text.split('.'):\n if len(sen) < 3:\n continue\n sen = sen.strip()\n sen += '.'\n print(sen)\n sen = sen.strip()\n if self.config.use_phonemes:\n seq = np.asarray(\n phoneme_to_sequence(text, text_cleaner, self.config.phoneme_language, self.config.enable_eos_bos_chars),\n dtype=np.int32)\n else:\n seq = np.asarray(text_to_sequence(text, text_cleaner), dtype=np.int32)\n chars_var = torch.from_numpy(seq).unsqueeze(0).long()\n if self.use_cuda:\n chars_var = chars_var.cuda()\n decoder_output, postnet_output, alignments, stop_tokens = self.model.inference(\n chars_var)\n postnet_output = postnet_output[0].data.cpu().numpy()\n if self.config.model == \"Tacotron\":\n wav = self.ap.inv_spectrogram(postnet_output.T)\n else:\n wav = self.ap.inv_mel_spectrogram(postnet_output.T) \n wavs += list(wav)\n wavs += [0] * 10000\n self.save_wav(wavs, path)","sub_path":"tts/server/synthesizer.py","file_name":"synthesizer.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423858619","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.post_list, name='post_list'),\n url(r'^post/(?P\\d+)/$', views.post_detail.as_view(), name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/edit/(?P\\d+)/$', views.post_edit, name='post_edit'),\n url(r'^post/edit/(?P\\d+)/delete/$', views.post_delete.as_view(), name='post_delete'),\n url(r'^authors/$', views.author_list, name='author_list'),\n url(r'^authors/(?P\\d+)/$', views.author_detail, name='author_detail'),\n url(r'^authors/new/$', views.author_new, name='author_new'),\n url(r'^authors/edit/(?P\\d+)/$', views.author_edit, name='author_edit'),\n url(r'^login', views.login, name='login'),\n url(r'^logout', views.logout, name='logout'),\n url(r'^switch', views.switch_accounts, name='switch_accounts'),\n]\n","sub_path":"heymonkeyblogriot/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2281255","text":"import numpy as np\nfrom numpy import loadtxt\nfrom numpy import savetxt\nfrom sklearn.cluster import KMeans\nimport pandas as pd\nfrom sklearn.metrics import silhouette_score as score\nfrom android.os import Environment\n\ndef Clustering(inputDate):\n dir = str(Environment.getExternalStorageDirectory())\n file = loadtxt(dir + \"/FreeForm-Writing/.\" + inputDate + \"/.Working/inputData.csv\",delimiter=\",\")\n inData = file.reshape(-1,1)\n\n y = KMeans(n_clusters=2).fit(inData)\n scr = score(inData,y.labels_)\n\n cluster_map = pd.DataFrame()\n cluster_map['data_index'] = file\n cluster_map['cluster'] = y.labels_\n\n c0 = [item[0] for item in cluster_map[cluster_map.cluster == 0].values.tolist()]\n c1 = [item[0] for item in cluster_map[cluster_map.cluster == 1].values.tolist()]\n if len(c0) > len(c1):\n clust0 = c1\n else:\n clust0 = c0\n\n if(len(clust0) > 5):\n inData1 = np.array(clust0).reshape(-1,1)\n y1 = KMeans(n_clusters=2).fit(inData1)\n scr1 = score(inData1,y1.labels_)\n cluster_map1 = pd.DataFrame()\n cluster_map1['data_index'] = clust0\n cluster_map1['cluster'] = y1.labels_\n\n cl0 = [item[0] for item in cluster_map1[cluster_map1.cluster == 0].values.tolist()]\n cl1 = [item[0] for item in cluster_map1[cluster_map1.cluster == 1].values.tolist()]\n\n if scr > scr1:\n output = clust0\n else:\n if len(cl0) > len(cl1):\n output = cl1\n else:\n output = cl0\n else:\n output = clust0\n\n savetxt(dir + \"/FreeForm-Writing/.\" + inputDate + \"/.Working/outputData.csv\",output,delimiter=\",\")\n return","sub_path":"app/src/main/python/Clustering.py","file_name":"Clustering.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641086592","text":"from tkinter import *\nfrom tkinter import ttk\nimport random\nfrom colors import *\nfrom algorithms.bubbleSort import bubble_sort\nfrom algorithms.mergeSort import merge_sort\n\nwindow = Tk()\nwindow.title(\"Sorting Algorithms Visualization\")\nwindow.maxsize(1000, 700)\nwindow.config(bg = WHITE)\n\nalgorithm_name = StringVar()\nalgo_list = ['Bubble Sort', 'Merge Sort']\n\nspeed_name = StringVar()\nspeed_list = ['Fast', 'Medium', 'Slow']\n\ndata = []\n\n# This function will draw randomly generated list data[] on the canvas as vertical bars\ndef drawData(data, colorArray):\n canvas.delete(\"all\")\n canvas_width = 800\n canvas_height = 400\n x_width = canvas_width / (len(data) + 1)\n offset = 4\n spacing = 2\n normalizedData = [i / max(data) for i in data]\n\n for i, height in enumerate(normalizedData):\n x0 = i * x_width + offset + spacing\n y0 = canvas_height - height * 390\n x1 = (i + 1) * x_width + offset\n y1 = canvas_height\n canvas.create_rectangle(x0, y0, x1, y1, fill=colorArray[i])\n\n window.update_idletasks()\n\n# This function will generate array with random values every time we hit the generate button\ndef generate():\n global data\n\n data = []\n for i in range(0, 100):\n random_value = random.randint(1, 150)\n data.append(random_value)\n\n drawData(data, [BLUE for x in range(len(data))])\n\n# This function will set sorting speed\ndef set_speed():\n if speed_menu.get() == 'Slow':\n return 0.3\n elif speed_menu.get() == 'Medium':\n return 0.1\n else:\n return 0.001\n\n# This funciton will trigger a selected algorithm and start sorting\ndef sort():\n global data\n timeTick = set_speed()\n \n if algo_menu.get() == 'Bubble Sort':\n bubble_sort(data, drawData, timeTick)\n \n elif algo_menu.get() == 'Merge Sort':\n merge_sort(data, 0, len(data)-1, drawData, timeTick)\n\n### User interface here ###\nUI_frame = Frame(window, width= 900, height=300, bg=WHITE)\nUI_frame.grid(row=0, column=0, padx=10, pady=5)\n\n# dropdown to select sorting algorithm \nl1 = Label(UI_frame, text=\"Algorithm: \", bg=WHITE)\nl1.grid(row=0, column=0, padx=10, pady=5, sticky=W)\nalgo_menu = ttk.Combobox(UI_frame, textvariable=algorithm_name, values=algo_list)\nalgo_menu.grid(row=0, column=1, padx=5, pady=5)\nalgo_menu.current(0)\n\n# dropdown to select sorting speed \nl2 = Label(UI_frame, text=\"Sorting Speed: \", bg=WHITE)\nl2.grid(row=1, column=0, padx=10, pady=5, sticky=W)\nspeed_menu = ttk.Combobox(UI_frame, textvariable=speed_name, values=speed_list)\nspeed_menu.grid(row=1, column=1, padx=5, pady=5)\nspeed_menu.current(0)\n\n# sort button \nb1 = Button(UI_frame, text=\"Sort\", command=sort, bg=LIGHT_GRAY)\nb1.grid(row=2, column=1, padx=5, pady=5)\n\n# button for generating array \nb3 = Button(UI_frame, text=\"Generate Array\", command=generate, bg=LIGHT_GRAY)\nb3.grid(row=2, column=0, padx=5, pady=5)\n\n# canvas to draw our array \ncanvas = Canvas(window, width=800, height=400, bg=WHITE)\ncanvas.grid(row=1, column=0, padx=10, pady=5)\n\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566221165","text":"'''\n파일을 쓰기 모드로 열어 출력값 적기 - 1~10까지 출력하기\n-모니터 화면에 출력하기\n-파일로 출력하기\n\n#--------------------\n\n파일 입출력 방법\n파일을 열어서 읽고, 쓰고, 덧붙이는 방법은 다음의 단계를 거칩니다.\n단계 설명\n1 open(filename, mode) 내장 함수로 filename 이름을 지닌 file 객체를 얻습니다. (파일 객체 생성)\n2 얻어진 파일 객체에서 자료를 읽거나, 쓰거나, 덧붙이는 작업 수행합니다. (파일 입출력 작업)\n3 모든 작업이 끝나면 close() 함수를 호출하여 작업 프로세스의 자원 점유 해제합니다. (파일 객체 해제)\n\n\n'''\n\n\nimport os\n\n# getcwd()는 os 모듈에 있는 함수여서 import\n# getcwd() : 현재 작업 디렉터리의 위치를 가져올 때 쓰입니다.\nprint (os.getcwd())\n\n\n\"\"\"\n모니터 화면에 출력하기\n첫 번째 방법은 모니터 화면에 결과값을 적는 방법입니다.\n\"\"\"\nfor i in range(1, 11):\n data = \"%d번째 줄입니다.\\n\" % i\n print(data)\n\n\n\n\"\"\"\n파일로 출력하기\n파일을 쓰기 모드로 열어 출력값 적기\n\n파일을 쓰기 모드로 열어 프로그램의 출력값을 파일에 직접 작성하겠습니다.\n두 번째 방법은 모니터 화면 대신 파일에 결과값을 작성하는 방법입니다.\n두 방법의 차이점은 print()대신 파일 객체 f의 write()함수를 이용한 것으로 출력을 하기 위한 객체말고는 모두 동일합니다.\n\"\"\"\n\n# 파일 객체 생성\nf = open(\"C:/CodeLab/test.txt\", 'w')\n\n# 파일 입출력 작업\nfor i in range(1, 11):\n data = \"%d번째 줄입니다.\\n\" % i\n f.write(data) # 문자열을 파일에 기록\n\n# 파일 객체 해제\n# close()는 열려 있는 파일 객체를 닫아 주는 역할\nf.close()\n\n\n\"\"\"\nclose()을 마지막에 호출하지 않으면 해당 file객체가 다른 값으로 치환되거나 프로그램이 종료될 때 자동으로 close() 함수가 호출됩니다.\n하지만 명시적으로 close()를 호출하는 것을 권장합니다.\n\"\"\"","sub_path":"PythonMain/src/ch06-io/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"7005385","text":"import pygame, sys, random, time\r\nfrom pygame.locals import *\r\npygame.init()\r\nclock = pygame.time.Clock()\r\npygame.display.set_caption(\"Space Invaders\")\r\nscreen = pygame.display.set_mode((640,650))\r\nbadguy_image = pygame.image.load(\"images/badguy.png\").convert()\r\nbadguy_image.set_colorkey((0,0,0))\r\nfighter_image = pygame.image.load(\"images/fighter.png\").convert()\r\nfighter_image.set_colorkey((255,255,255))\r\nlast_badguy_spawn_time = 0\r\n\r\nclass Badguy:\r\n\r\n def __init__(self):\r\n self.x = random.randint(0,520)\r\n self.y = -100\r\n self.dy = random.randint(2,6)\r\n self.dx = random.choice((-1,1))*self.dy\r\n \r\n def move(self):\r\n self.x += self.dx\r\n self.y += self.dy \r\n \r\n def bounce(self):\r\n if self.x < 0 or self.x > 570: \r\n self.dx *= -1\r\n \r\n def off_screen(self):\r\n return self.y > 640\r\n \r\n def draw(self):\r\n screen.blit(badguy_image,(self.x,self.y))\r\n \r\nclass Fighter:\r\n def __init__(self):\r\n self.x = 320\r\n \r\n def move(self):\r\n if pressed_keys[K_LEFT] and self.x > 0:\r\n self.x -=3\r\n if pressed_keys[K_RIGHT] and self.x < 540:\r\n self.x +=3\r\n \r\n def draw(self):\r\n screen.blit(fighter_image,(self.x,591))\r\n \r\nbadguys = []\r\nfighter = Fighter()\r\n\r\nwhile 1:\r\n clock.tick(60)\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit()\r\n pressed_keys = pygame.key.get_pressed() \r\n \r\n if time.time() - last_badguy_spawn_time > 0.5:\r\n badguys.append(Badguy())\r\n last_badguy_spawn_time = time.time() \r\n \r\n screen.fill((0,0,0))\r\n\r\n i = 0\r\n while i < len(badguys):\r\n badguys[i].move()\r\n badguys[i].bounce()\r\n badguys[i].draw()\r\n if badguys[i].off_screen():\r\n del badguys[i]\r\n i -= 1\r\n i += 1\r\n \r\n fighter.move()\r\n fighter.draw() \r\n \r\n pygame.display.update()","sub_path":"code/Page_89_90.py","file_name":"Page_89_90.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"194039042","text":"# # Unity ML-Agents Toolkit\nimport argparse\nimport yaml\n\nimport os\nimport numpy as np\nimport json\n\nfrom typing import Callable, Optional, List, NamedTuple, Dict\n\nimport mlagents.trainers\nimport mlagents_envs\nfrom mlagents import tf_utils\nfrom mlagents.trainers.trainer_controller import TrainerController\nfrom mlagents.trainers.meta_curriculum import MetaCurriculum\nfrom mlagents.trainers.trainer_util import (\n load_config,\n TrainerFactory,\n handle_existing_directories,\n assemble_curriculum_config,\n)\nfrom mlagents.trainers.stats import (\n TensorboardWriter,\n CSVWriter,\n StatsReporter,\n GaugeWriter,\n ConsoleWriter,\n)\nfrom mlagents.trainers.cli_utils import (\n StoreConfigFile,\n DetectDefault,\n DetectDefaultStoreTrue,\n)\nfrom mlagents_envs.environment import UnityEnvironment\nfrom mlagents.trainers.sampler_class import SamplerManager\nfrom mlagents.trainers.exception import SamplerException, TrainerConfigError\nfrom mlagents_envs.base_env import BaseEnv\nfrom mlagents.trainers.subprocess_env_manager import SubprocessEnvManager\nfrom mlagents_envs.side_channel.side_channel import SideChannel\nfrom mlagents_envs.side_channel.engine_configuration_channel import EngineConfig\nfrom mlagents_envs.timers import (\n hierarchical_timer,\n get_timer_tree,\n add_metadata as add_timer_metadata,\n)\nfrom mlagents_envs import logging_util\n\nlogger = logging_util.get_logger(__name__)\n\n\ndef _create_parser():\n argparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n argparser.add_argument(\"trainer_config_path\", action=StoreConfigFile)\n argparser.add_argument(\n \"--env\",\n default=None,\n dest=\"env_path\",\n help=\"Path to the Unity executable to train\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--lesson\",\n default=0,\n type=int,\n help=\"The lesson to start with when performing curriculum training\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--keep-checkpoints\",\n default=5,\n type=int,\n help=\"The maximum number of model checkpoints to keep. Checkpoints are saved after the\"\n \"number of steps specified by the save-freq option. Once the maximum number of checkpoints\"\n \"has been reached, the oldest checkpoint is deleted when saving a new checkpoint.\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--load\",\n default=False,\n dest=\"load_model\",\n action=DetectDefaultStoreTrue,\n help=argparse.SUPPRESS, # Deprecated but still usable for now.\n )\n argparser.add_argument(\n \"--resume\",\n default=False,\n dest=\"resume\",\n action=DetectDefaultStoreTrue,\n help=\"Whether to resume training from a checkpoint. Specify a --run-id to use this option. \"\n \"If set, the training code loads an already trained model to initialize the neural network \"\n \"before resuming training. This option is only valid when the models exist, and have the same \"\n \"behavior names as the current agents in your scene.\",\n )\n argparser.add_argument(\n \"--force\",\n default=False,\n dest=\"force\",\n action=DetectDefaultStoreTrue,\n help=\"Whether to force-overwrite this run-id's existing summary and model data. (Without \"\n \"this flag, attempting to train a model with a run-id that has been used before will throw \"\n \"an error.\",\n )\n argparser.add_argument(\n \"--run-id\",\n default=\"ppo\",\n help=\"The identifier for the training run. This identifier is used to name the \"\n \"subdirectories in which the trained model and summary statistics are saved as well \"\n \"as the saved model itself. If you use TensorBoard to view the training statistics, \"\n \"always set a unique run-id for each training run. (The statistics for all runs with the \"\n \"same id are combined as if they were produced by a the same session.)\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--initialize-from\",\n metavar=\"RUN_ID\",\n default=None,\n help=\"Specify a previously saved run ID from which to initialize the model from. \"\n \"This can be used, for instance, to fine-tune an existing model on a new environment. \"\n \"Note that the previously saved models must have the same behavior parameters as your \"\n \"current environment.\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--save-freq\",\n default=50000,\n type=int,\n help=\"How often (in steps) to save the model during training\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--seed\",\n default=-1,\n type=int,\n help=\"A number to use as a seed for the random number generator used by the training code\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--train\",\n default=False,\n dest=\"train_model\",\n action=DetectDefaultStoreTrue,\n help=argparse.SUPPRESS,\n )\n argparser.add_argument(\n \"--inference\",\n default=False,\n dest=\"inference\",\n action=DetectDefaultStoreTrue,\n help=\"Whether to run in Python inference mode (i.e. no training). Use with --resume to load \"\n \"a model trained with an existing run ID.\",\n )\n argparser.add_argument(\n \"--base-port\",\n default=UnityEnvironment.BASE_ENVIRONMENT_PORT,\n type=int,\n help=\"The starting port for environment communication. Each concurrent Unity environment \"\n \"instance will get assigned a port sequentially, starting from the base-port. Each instance \"\n \"will use the port (base_port + worker_id), where the worker_id is sequential IDs given to \"\n \"each instance from 0 to (num_envs - 1). Note that when training using the Editor rather \"\n \"than an executable, the base port will be ignored.\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--num-envs\",\n default=1,\n type=int,\n help=\"The number of concurrent Unity environment instances to collect experiences \"\n \"from when training\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--no-graphics\",\n default=False,\n action=DetectDefaultStoreTrue,\n help=\"Whether to run the Unity executable in no-graphics mode (i.e. without initializing \"\n \"the graphics driver. Use this only if your agents don't use visual observations.\",\n )\n argparser.add_argument(\n \"--debug\",\n default=False,\n action=DetectDefaultStoreTrue,\n help=\"Whether to enable debug-level logging for some parts of the code\",\n )\n argparser.add_argument(\n \"--env-args\",\n default=None,\n nargs=argparse.REMAINDER,\n help=\"Arguments passed to the Unity executable. Be aware that the standalone build will also \"\n \"process these as Unity Command Line Arguments. You should choose different argument names if \"\n \"you want to create environment-specific arguments. All arguments after this flag will be \"\n \"passed to the executable.\",\n action=DetectDefault,\n )\n argparser.add_argument(\n \"--cpu\",\n default=False,\n action=DetectDefaultStoreTrue,\n help=\"Forces training using CPU only\",\n )\n\n argparser.add_argument(\"--version\", action=\"version\", version=\"\")\n\n eng_conf = argparser.add_argument_group(title=\"Engine Configuration\")\n eng_conf.add_argument(\n \"--width\",\n default=None,\n type=int,\n help=\"The width of the executable window of the environment(s) in pixels \"\n \"(ignored for editor training).\",\n action=DetectDefault,\n )\n eng_conf.add_argument(\n \"--height\",\n default=None,\n type=int,\n help=\"The height of the executable window of the environment(s) in pixels \"\n \"(ignored for editor training)\",\n action=DetectDefault,\n )\n eng_conf.add_argument(\n \"--quality-level\",\n default=5,\n type=int,\n help=\"The quality level of the environment(s). Equivalent to calling \"\n \"QualitySettings.SetQualityLevel in Unity.\",\n action=DetectDefault,\n )\n eng_conf.add_argument(\n \"--time-scale\",\n default=20,\n type=float,\n help=\"The time scale of the Unity environment(s). Equivalent to setting \"\n \"Time.timeScale in Unity.\",\n action=DetectDefault,\n )\n eng_conf.add_argument(\n \"--target-frame-rate\",\n default=-1,\n type=int,\n help=\"The target frame rate of the Unity environment(s). Equivalent to setting \"\n \"Application.targetFrameRate in Unity.\",\n action=DetectDefault,\n )\n eng_conf.add_argument(\n \"--capture-frame-rate\",\n default=60,\n type=int,\n help=\"The capture frame rate of the Unity environment(s). Equivalent to setting \"\n \"Time.captureFramerate in Unity.\",\n action=DetectDefault,\n )\n return argparser\n\n\nparser = _create_parser()\n\n\nclass RunOptions(NamedTuple):\n behaviors: Dict\n debug: bool = parser.get_default(\"debug\")\n seed: int = parser.get_default(\"seed\")\n env_path: Optional[str] = parser.get_default(\"env_path\")\n run_id: str = parser.get_default(\"run_id\")\n initialize_from: str = parser.get_default(\"initialize_from\")\n load_model: bool = parser.get_default(\"load_model\")\n resume: bool = parser.get_default(\"resume\")\n force: bool = parser.get_default(\"force\")\n train_model: bool = parser.get_default(\"train_model\")\n inference: bool = parser.get_default(\"inference\")\n save_freq: int = parser.get_default(\"save_freq\")\n keep_checkpoints: int = parser.get_default(\"keep_checkpoints\")\n base_port: int = parser.get_default(\"base_port\")\n num_envs: int = parser.get_default(\"num_envs\")\n curriculum_config: Optional[Dict] = None\n lesson: int = parser.get_default(\"lesson\")\n no_graphics: bool = parser.get_default(\"no_graphics\")\n multi_gpu: bool = parser.get_default(\"multi_gpu\")\n parameter_randomization: Optional[Dict] = None\n env_args: Optional[List[str]] = parser.get_default(\"env_args\")\n cpu: bool = parser.get_default(\"cpu\")\n width: int = parser.get_default(\"width\")\n height: int = parser.get_default(\"height\")\n quality_level: int = parser.get_default(\"quality_level\")\n time_scale: float = parser.get_default(\"time_scale\")\n target_frame_rate: int = parser.get_default(\"target_frame_rate\")\n capture_frame_rate: int = parser.get_default(\"capture_frame_rate\")\n\n @staticmethod\n def from_argparse(args: argparse.Namespace) -> \"RunOptions\":\n \"\"\"\n Takes an argparse.Namespace as specified in `parse_command_line`, loads input configuration files\n from file paths, and converts to a CommandLineOptions instance.\n :param args: collection of command-line parameters passed to mlagents-learn\n :return: CommandLineOptions representing the passed in arguments, with trainer config, curriculum and sampler\n configs loaded from files.\n \"\"\"\n argparse_args = vars(args)\n run_options_dict = {}\n run_options_dict.update(argparse_args)\n config_path = StoreConfigFile.trainer_config_path\n\n # Load YAML\n yaml_config = load_config(config_path)\n # This is the only option that is not optional and has no defaults.\n if \"behaviors\" not in yaml_config:\n raise TrainerConfigError(\n \"Trainer configurations not found. Make sure your YAML file has a section for behaviors.\"\n )\n # Use the YAML file values for all values not specified in the CLI.\n for key, val in yaml_config.items():\n # Detect bad config options\n if not hasattr(RunOptions, key):\n raise TrainerConfigError(\n \"The option {} was specified in your YAML file, but is invalid.\".format(\n key\n )\n )\n if key not in DetectDefault.non_default_args:\n run_options_dict[key] = val\n\n # Keep deprecated --load working, TODO: remove\n run_options_dict[\"resume\"] = (\n run_options_dict[\"resume\"] or run_options_dict[\"load_model\"]\n )\n\n return RunOptions(**run_options_dict)\n\n\ndef get_version_string() -> str:\n # pylint: disable=no-member\n return f\"\"\" Version information:\n ml-agents: {mlagents.trainers.__version__},\n ml-agents-envs: {mlagents_envs.__version__},\n Communicator API: {UnityEnvironment.API_VERSION},\n TensorFlow: {tf_utils.tf.__version__}\"\"\"\n\n\ndef parse_command_line(argv: Optional[List[str]] = None) -> RunOptions:\n args = parser.parse_args(argv)\n return RunOptions.from_argparse(args)\n\n\ndef run_training(run_seed: int, options: RunOptions) -> None:\n \"\"\"\n Launches training session.\n :param options: parsed command line arguments\n :param run_seed: Random seed used for training.\n :param run_options: Command line arguments for training.\n \"\"\"\n with hierarchical_timer(\"run_training.setup\"):\n base_path = \"results\"\n write_path = os.path.join(base_path, options.run_id)\n maybe_init_path = (\n os.path.join(base_path, options.run_id) if options.initialize_from else None\n )\n run_logs_dir = os.path.join(write_path, \"run_logs\")\n port = options.base_port\n # Check if directory exists\n handle_existing_directories(\n write_path, options.resume, options.force, maybe_init_path\n )\n # Make run logs directory\n os.makedirs(run_logs_dir, exist_ok=True)\n # Configure CSV, Tensorboard Writers and StatsReporter\n # We assume reward and episode length are needed in the CSV.\n csv_writer = CSVWriter(\n write_path,\n required_fields=[\n \"Environment/Cumulative Reward\",\n \"Environment/Episode Length\",\n ],\n )\n tb_writer = TensorboardWriter(write_path, clear_past_data=not options.resume)\n gauge_write = GaugeWriter()\n console_writer = ConsoleWriter()\n StatsReporter.add_writer(tb_writer)\n StatsReporter.add_writer(csv_writer)\n StatsReporter.add_writer(gauge_write)\n StatsReporter.add_writer(console_writer)\n\n if options.env_path is None:\n port = UnityEnvironment.DEFAULT_EDITOR_PORT\n env_factory = create_environment_factory(\n options.env_path,\n options.no_graphics,\n run_seed,\n port,\n options.env_args,\n os.path.abspath(run_logs_dir), # Unity environment requires absolute path\n )\n engine_config = EngineConfig(\n width=options.width,\n height=options.height,\n quality_level=options.quality_level,\n time_scale=options.time_scale,\n target_frame_rate=options.target_frame_rate,\n capture_frame_rate=options.capture_frame_rate,\n )\n env_manager = SubprocessEnvManager(env_factory, engine_config, options.num_envs)\n curriculum_config = assemble_curriculum_config(options.behaviors)\n maybe_meta_curriculum = try_create_meta_curriculum(\n curriculum_config, env_manager, options.lesson\n )\n sampler_manager, resampling_interval = create_sampler_manager(\n options.parameter_randomization, run_seed\n )\n trainer_factory = TrainerFactory(\n options.behaviors,\n options.run_id,\n write_path,\n options.keep_checkpoints,\n not options.inference,\n options.resume,\n run_seed,\n maybe_init_path,\n maybe_meta_curriculum,\n options.multi_gpu,\n )\n # Create controller and begin training.\n tc = TrainerController(\n trainer_factory,\n write_path,\n options.run_id,\n options.save_freq,\n maybe_meta_curriculum,\n not options.inference,\n run_seed,\n sampler_manager,\n resampling_interval,\n )\n\n # Begin training\n try:\n tc.start_learning(env_manager)\n finally:\n env_manager.close()\n write_run_options(write_path, options)\n write_timing_tree(run_logs_dir)\n\n\ndef write_run_options(output_dir: str, run_options: RunOptions) -> None:\n run_options_path = os.path.join(output_dir, \"configuration.yaml\")\n try:\n with open(run_options_path, \"w\") as f:\n try:\n yaml.dump(dict(run_options._asdict()), f, sort_keys=False)\n except TypeError: # Older versions of pyyaml don't support sort_keys\n yaml.dump(dict(run_options._asdict()), f)\n except FileNotFoundError:\n logger.warning(\n f\"Unable to save configuration to {run_options_path}. Make sure the directory exists\"\n )\n\n\ndef write_timing_tree(output_dir: str) -> None:\n timing_path = os.path.join(output_dir, \"timers.json\")\n try:\n with open(timing_path, \"w\") as f:\n json.dump(get_timer_tree(), f, indent=4)\n except FileNotFoundError:\n logger.warning(\n f\"Unable to save to {timing_path}. Make sure the directory exists\"\n )\n\n\ndef create_sampler_manager(sampler_config, run_seed=None):\n resample_interval = None\n if sampler_config is not None:\n if \"resampling-interval\" in sampler_config:\n # Filter arguments that do not exist in the environment\n resample_interval = sampler_config.pop(\"resampling-interval\")\n if (resample_interval <= 0) or (not isinstance(resample_interval, int)):\n raise SamplerException(\n \"Specified resampling-interval is not valid. Please provide\"\n \" a positive integer value for resampling-interval\"\n )\n\n else:\n raise SamplerException(\n \"Resampling interval was not specified in the sampler file.\"\n \" Please specify it with the 'resampling-interval' key in the sampler config file.\"\n )\n\n sampler_manager = SamplerManager(sampler_config, run_seed)\n return sampler_manager, resample_interval\n\n\ndef try_create_meta_curriculum(\n curriculum_config: Optional[Dict], env: SubprocessEnvManager, lesson: int\n) -> Optional[MetaCurriculum]:\n if curriculum_config is None or len(curriculum_config) <= 0:\n return None\n else:\n meta_curriculum = MetaCurriculum(curriculum_config)\n # TODO: Should be able to start learning at different lesson numbers\n # for each curriculum.\n meta_curriculum.set_all_curricula_to_lesson_num(lesson)\n return meta_curriculum\n\n\ndef create_environment_factory(\n env_path: Optional[str],\n no_graphics: bool,\n seed: int,\n start_port: int,\n env_args: Optional[List[str]],\n log_folder: str,\n) -> Callable[[int, List[SideChannel]], BaseEnv]:\n def create_unity_environment(\n worker_id: int, side_channels: List[SideChannel]\n ) -> UnityEnvironment:\n # Make sure that each environment gets a different seed\n env_seed = seed + worker_id\n return UnityEnvironment(\n file_name=env_path,\n worker_id=worker_id,\n seed=env_seed,\n no_graphics=no_graphics,\n base_port=start_port,\n additional_args=env_args,\n side_channels=side_channels,\n log_folder=log_folder,\n )\n\n return create_unity_environment\n\n\ndef run_cli(options: RunOptions) -> None:\n try:\n print(\n \"\"\"\n\n ▄▄▄▓▓▓▓\n ╓▓▓▓▓▓▓█▓▓▓▓▓\n ,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌\n ▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄\n ▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌\n ▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌\n ▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓\n ^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`\n '▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌\n ▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀\n `▀█▓▓▓▓▓▓▓▓▓▌\n ¬`▀▀▀█▓\n\n \"\"\"\n )\n except Exception:\n print(\"\\n\\n\\tUnity Technologies\\n\")\n print(get_version_string())\n\n if options.debug:\n log_level = logging_util.DEBUG\n else:\n log_level = logging_util.INFO\n # disable noisy warnings from tensorflow\n tf_utils.set_warnings_enabled(False)\n\n logging_util.set_log_level(log_level)\n\n logger.debug(\"Configuration for this run:\")\n logger.debug(json.dumps(options._asdict(), indent=4))\n\n # Options deprecation warnings\n if options.load_model:\n logger.warning(\n \"The --load option has been deprecated. Please use the --resume option instead.\"\n )\n if options.train_model:\n logger.warning(\n \"The --train option has been deprecated. Train mode is now the default. Use \"\n \"--inference to run in inference mode.\"\n )\n\n run_seed = options.seed\n if options.cpu:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n # Add some timer metadata\n add_timer_metadata(\"mlagents_version\", mlagents.trainers.__version__)\n add_timer_metadata(\"mlagents_envs_version\", mlagents_envs.__version__)\n add_timer_metadata(\"communication_protocol_version\", UnityEnvironment.API_VERSION)\n add_timer_metadata(\"tensorflow_version\", tf_utils.tf.__version__)\n\n if options.seed == -1:\n run_seed = np.random.randint(0, 10000)\n run_training(run_seed, options)\n\n\ndef main():\n run_cli(parse_command_line())\n\n\n# For python debugger to directly run this script\nif __name__ == \"__main__\":\n main()\n","sub_path":"ml-agents/mlagents/trainers/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":22725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"377454099","text":"#coding:utf-8\n\"\"\"\n@file: existed_task\n@author: IsolationWyn\n@contact: genius_wz@aliyun.com\n@python: 3.5.2\n@editor: PyCharm\n@create: 2017/7/1 3:18\n@description:\n --\n\"\"\"\nfrom TaskFeed.caee_utexas_task import CaeeUtexasTask\nfrom TaskFeed.che_utexas_task import CheUtexasTask\nfrom TaskFeed.cs_utexas_task import CSUtexasTask\nfrom TaskFeed.ece_utexas_task import ECEUtexasTask\nfrom TaskFeed.me_utexas_task import MeUtexasTask\nfrom TaskFeed.tmi_utexas_task import TmiUtexasTask\nfrom TaskFeed.eecs_berkeley_task import EECSBerkeleyTask\nfrom TaskFeed.me_berkeley_task import MEBerkeleyTask\nfrom TaskFeed.ce_berkeley_task import CEBerkeleyTask\nfrom CustomParser import ame_nd_parser,\\\n cse_nd_parser,\\\n ee_nd_parser,\\\n cbe_udel_parser,\\\n me_udel_parser,\\\n ce_udel_parser\n\nEXISTED_SPIDER = [\n {\n 'SpiderClass' : CaeeUtexasTask(),\n 'Major' : 'Civil and Environmental Engineering',\n 'Forecast' : 57,\n 'state' : True\n },\n {\n 'SpiderClass' : CheUtexasTask(),\n 'Major' : 'Chemical Engineering',\n 'Forecast' : 34,\n 'state' : True\n },\n {\n 'SpiderClass' : CSUtexasTask(),\n 'Major' : 'Computer Science ',\n 'Forecast' : 40,\n 'state' : True\n },\n {\n 'SpiderClass' : MeUtexasTask(),\n 'Major' : 'MECHANICAL ENGINEERING',\n 'Forecast' : 90,\n 'state' : True\n },\n {\n 'SpiderClass' : TmiUtexasTask(),\n 'Major' : 'Materials Science and Engineering',\n 'Forecast' : 100,\n 'state' : True\n },\n \n {\n 'SpiderClass' : ECEUtexasTask(),\n 'Major' : 'Electrical and Computer Engineering',\n 'Forecast' : 80,\n 'state' : True\n },\n {\n 'SpiderClass' : EECSBerkeleyTask(),\n 'Major' : 'Electrical Engineering and Computer Sciences',\n 'Forecast' : 130,\n 'state' : True\n },\n {\n 'SpiderClass': MEBerkeleyTask(),\n 'Major' : 'Mechanical Engineering',\n 'Forecast' : 53,\n 'state' : True\n },\n {\n 'SpiderClass' : CEBerkeleyTask(),\n 'Major' : 'Civil and Environmental Engineering',\n 'Forecast' : 52,\n 'state' : True\n },\n {\n 'SpiderClass' : ame_nd_parser.AmeNdTask,\n 'Major' : None,\n 'Forecast' : None,\n 'state' : False\n },\n {\n 'SpiderClass' : cse_nd_parser.CSENdTask,\n 'Major' : None,\n 'Forecast' : None,\n 'state' : False,\n 'error' : \"504\"\n },\n {\n 'SpiderClass' : ee_nd_parser.EENdTask,\n 'Major' : None,\n 'Forecast' : None,\n 'state' : False\n },\n {\n 'SpiderClass' : cbe_udel_parser.CBETask,\n 'Major' : '',\n 'Forecast' : None,\n 'state' : False\n },\n {\n 'SpiderClass' : me_udel_parser.MEUdelTask,\n 'Major' : '',\n 'Forecast' : None,\n 'state' : False\n },\n {\n 'SpiderClass' : ce_udel_parser.CEUdelTask,\n 'Major' : '',\n 'Forecast' : None,\n 'state' : False\n }\n]","sub_path":"TaskFeed/existed_spider.py","file_name":"existed_spider.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609182835","text":"import configparser\nimport logging\nimport os\n\nimport aiohttp\nimport discord\nfrom discord.ext import commands\n\nlogger = logging.getLogger(__name__)\n\nbot_directory = \"{}/Bot\".format(os.getcwd())\n\nconfig = configparser.ConfigParser()\n\n\nclass admin(commands.Cog):\n \"\"\"Admin only commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n @commands.has_any_role(\"Admin\")\n async def setprefix(self, ctx, prefix: str):\n \"\"\"Set prefix for commands\"\"\"\n if len(prefix) > 2:\n await ctx.send(\"You can only have 2 characters maximum set as a prefix\")\n return\n\n # Set the new prefix with the bot then save to to prefs.json\n self.bot.command_prefix = prefix\n config.read(\"{}/config.ini\".format(bot_directory))\n config[\"DEFAULT\"][\"prefix\"] = prefix\n with open(\"{}/config.ini\".format(bot_directory), \"w\") as config_file:\n config.write(config_file)\n await ctx.send(\"Prefix was set as {}\".format(config[\"DEFAULT\"][\"prefix\"]))\n\n # Set the playing message to use the new prefix\n await self.bot.change_presence(\n activity=discord.Game(name=\"{}help\".format(config[\"DEFAULT\"][\"prefix\"]))\n )\n\n @commands.command()\n @commands.has_any_role(\"Admin\")\n @commands.cooldown(1, 30, commands.BucketType.default)\n async def setname(self, ctx, *, name: str = None):\n \"\"\"Set the bots nickname\"\"\"\n await ctx.guild.me.edit(nick=name)\n\n @commands.command()\n @commands.has_any_role(\"Admin\")\n @commands.cooldown(1, 10, commands.BucketType.default)\n async def setavatar(self, ctx, link: str):\n \"\"\"Set the bots avatar\"\"\"\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(link) as img:\n with open(\"avatar.png\", \"wb\") as f:\n f.write(await img.read())\n with open(\"avatar.png\", \"rb\") as f:\n await self.bot.user.edit(avatar=f.read())\n os.remove(\"avatar.png\")\n await ctx.send(\"New avatar set!\")\n except Exception as e:\n await ctx.send(str(e))\n\n @commands.command()\n @commands.has_any_role(\"Admin\")\n async def toggleblacklist(self, ctx):\n \"\"\"Toggle the blacklist on and off\"\"\"\n config.read(\"{}/config.ini\".format(bot_directory))\n blacklist_enabled = config.getboolean(\"MODERATION\", \"blacklistenabled\")\n if blacklist_enabled:\n config[\"MODERATION\"][\"blacklistenabled\"] = \"False\"\n await ctx.send(\"Blacklist is now disabled\")\n else:\n config[\"MODERATION\"][\"blacklistenabled\"] = \"True\"\n await ctx.send(\"Blacklist is now enabled\")\n\n with open(\"{}/config.ini\".format(bot_directory), \"w\") as config_file:\n config.write(config_file)\n\n @commands.command()\n @commands.has_any_role(\"Admin\")\n async def togglelog(self, ctx):\n \"\"\"Toggle the chat logs on and off\"\"\"\n config.read(\"{}/config.ini\".format(bot_directory))\n log_enabled = config.getboolean(\"MODERATION\", \"logenabled\")\n if log_enabled:\n config[\"MODERATION\"][\"logenabled\"] = \"False\"\n await ctx.send(\"Chat logs are now disabled\")\n else:\n config[\"MODERATION\"][\"logenabled\"] = \"True\"\n await ctx.send(\"Chat logs now enabled\")\n\n with open(\"{}/config.ini\".format(bot_directory), \"w\") as config_file:\n config.write(config_file)\n\n @commands.command()\n @commands.has_any_role(\"Admin\")\n async def toggleautogreet(self, ctx):\n \"\"\"Toggle autogreet on and off\"\"\"\n config.read(\"{}/config.ini\".format(bot_directory))\n autogreet_enabled = config.getboolean(\"DEFAULT\", \"autogreet\")\n if autogreet_enabled:\n config[\"DEFAULT\"][\"autogreet\"] = \"False\"\n await ctx.send(\"Autogreet is now disabled\")\n else:\n config[\"DEFAULT\"][\"autogreet\"] = \"True\"\n await ctx.send(\"Autogreet is now enabled\")\n\n with open(\"{}/config.ini\".format(bot_directory), \"w\") as config_file:\n config.write(config_file)\n\n\ndef setup(bot):\n bot.add_cog(admin(bot))\n","sub_path":"Bot/cogs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186890218","text":"# -*- mode: python -*-\n\ncurr_dir = os.getcwd() #os.path.dirname(os.path.realpath(__file__))\nimport os\nif not os.path.isdir('pyconrad'):\n os.makedirs('pyconrad')\n\n# need to add pyconrad folder so we need a dummy file inside\nwith open(curr_dir + '/pyconrad/deleteMe.txt', 'w') as deleteMe:\n deleteMe.write('Dummy file...\\n')\n\t\nadded_files = [\n (curr_dir + '/dependency/*.py', 'dependency'),\n\t(curr_dir + '/data/Head_Phantom.*', 'data'),\n\t(curr_dir + '/include/*.py', 'include'),\n\t(curr_dir + '/icons/*', 'icons'),\n\t(curr_dir + '/languages/*', 'languages'),\n\t(curr_dir + '/Math/*.py', 'Math'),\n\t(curr_dir + '/threads/*.py', 'threads'),\n\t(curr_dir + '/config.xml', '.'),\n\t#(curr_dir + '/*.npy', '.'),\n\t(curr_dir + '/pyconrad/deleteMe.txt', 'pyconrad')\n]\n\nblock_cipher = None\n\na = Analysis(['InteractiveConeBeamReconstruction.pyw'],\n pathex=['C:\\\\Users\\\\Jonas\\\\Git\\\\InteractiveConeBeamReconstruction'],\n binaries=[],\n datas=added_files,\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='InteractiveConeBeamReconstruction',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n runtime_tmpdir=None,\n console=True )\n","sub_path":"pyinstaller_onefile.spec","file_name":"pyinstaller_onefile.spec","file_ext":"spec","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38230987","text":"\"\"\"\nDeformatoin field operations\n\"\"\"\n\nimport SimpleITK as sitk\nimport numpy as np\n\nfrom skimage.morphology import convex_hull_image\n\nfrom platipy.imaging.registration.registration import (\n apply_field,\n fast_symmetric_forces_demons_registration,\n convert_mask_to_reg_structure,\n)\n\n\ndef get_bone_mask(image, lower_threshold=350, upper_threshold=3500, max_hole_size=5):\n \"\"\"\n Automatically generate a binary mask of bones from a CT image.\n\n Args:\n image ([SimpleITK.Image]): The patient x-ray CT image to segment.\n lower_threshold (int, optional): Lower voxel value for threshold. Defaults to 350.\n upper_threshold (int, optional): Upper voxel value for threshold. Defaults to 3500.\n max_hole_size (int | list | bool, optional): Maximum hole size to be filled in millimetres. Can be specified as a vector (z,y,x). Defaults to 5.\n\n Returns:\n [SimpleITK.Image]: The binary bone mask.\n \"\"\"\n\n bone_mask = sitk.BinaryThreshold(\n image, lowerThreshold=lower_threshold, upperThreshold=upper_threshold\n )\n\n if max_hole_size is not False:\n if not hasattr(max_hole_size, \"__iter__\"):\n max_hole_size = (max_hole_size,) * 3\n\n bone_mask = sitk.BinaryMorphologicalClosing(bone_mask, max_hole_size)\n\n return bone_mask\n\n\ndef get_external_mask(\n image, lower_threshold=-100, upper_threshold=2500, dilate=1, max_hole_size=False\n):\n \"\"\"\n Automatically generate a binary mask of the patient external contour.\n Uses slice-wise convex hull generation.\n\n Args:\n image ([SimpleITK.Image]): The patient x-ray CT image to segment. May work with other modalities with modified thresholds.\n lower_threshold (int, optional): Lower voxel value for threshold. Defaults to -100.\n upper_threshold (int, optional): Upper voxel value for threshold. Defaults to 2500.\n dilate (int | list | bool, optional): Dilation filter size applied to the binary mask. Can be specified as a vector (z,y,x). Defaults to 1.\n max_hole_size (int | list | bool, optional): Maximum hole size to be filled in millimetres. Can be specified as a vector (z,y,x). Defaults to False.\n\n Returns:\n [SimpleITK.Image]: The binary external mask.\n \"\"\"\n\n # Get all points inside the body\n external_mask = sitk.BinaryThreshold(\n image, lowerThreshold=lower_threshold, upperThreshold=upper_threshold\n )\n\n external_mask_components = sitk.ConnectedComponent(external_mask, True)\n\n # Second largest volume is most likely the body - you should check this!\n body_mask = sitk.Equal(sitk.RelabelComponent(external_mask_components), 1)\n\n if dilate is not False:\n if not hasattr(dilate, \"__iter__\"):\n dilate = (dilate,) * 3\n body_mask = sitk.BinaryDilate(body_mask, dilate)\n\n if max_hole_size is not False:\n if not hasattr(max_hole_size, \"__iter__\"):\n max_hole_size = (max_hole_size,) * 3\n\n body_mask = sitk.BinaryMorphologicalClosing(body_mask, max_hole_size)\n body_mask = sitk.BinaryFillhole(body_mask, fullyConnected=True)\n\n arr = sitk.GetArrayFromImage(body_mask)\n\n convex_hull_slices = np.zeros_like(arr)\n\n for index in np.arange(0, np.alen(arr)):\n convex_hull_slices[index] = convex_hull_image(arr[index])\n\n body_mask_hull = sitk.GetImageFromArray(convex_hull_slices)\n body_mask_hull.CopyInformation(body_mask)\n\n return body_mask_hull\n\n\ndef generate_field_shift(mask_image, vector_shift=(10, 10, 10), gaussian_smooth=5):\n \"\"\"\n Shifts (moves) a structure defined using a binary mask.\n\n Args:\n mask_image ([SimpleITK.Image]): The binary mask to shift.\n vector_shift (tuple, optional): The displacement vector applied to the entire binary mask.\n Convention: (+/-, +/-, +/-) = (sup/inf, post/ant, left/right) shift.\n Defined in millimetres.\n Defaults to (10, 10, 10).\n gaussian_smooth (int | list, optional): Scale of a Gaussian kernel used to smooth the deformation vector field. Defaults to 5.\n\n Returns:\n [SimpleITK.Image]: The binary mask following the shift.\n [SimpleITK.DisplacementFieldTransform]: The transform representing the shift.\n [SimpleITK.Image]: The displacement vector field representing the shift.\n \"\"\"\n # Define array\n # Used for image array manipulations\n mask_image_arr = sitk.GetArrayFromImage(mask_image)\n\n # The template deformation field\n # Used to generate transforms\n dvf_arr = np.zeros(mask_image_arr.shape + (3,))\n dvf_arr = dvf_arr - np.array([[[vector_shift[::-1]]]])\n dvf_template = sitk.GetImageFromArray(dvf_arr)\n\n # Copy image information\n dvf_template.CopyInformation(mask_image)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n mask_image_shift = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n dvf_template = sitk.Mask(dvf_template, mask_image | mask_image_shift)\n\n # smooth\n if np.any(gaussian_smooth):\n\n if not hasattr(gaussian_smooth, \"__iter__\"):\n gaussian_smooth = (gaussian_smooth,) * 3\n\n dvf_template = sitk.SmoothingRecursiveGaussian(dvf_template, gaussian_smooth)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n mask_image_shift = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n return mask_image_shift, dvf_tfm, dvf_template\n\n\ndef generate_field_asymmetric_contract(\n mask_image, vector_asymmetric_contract=(10, 10, 10), gaussian_smooth=5\n):\n \"\"\"\n Contracts a structure (defined using a binary mask) using a specified vector.\n\n Args:\n mask_image ([SimpleITK.Image]): The binary mask to contract.\n vector_asymmetric_contract (tuple, optional): The contraction vector applied to the entire binary mask.\n Convention: (+/-, +/-, +/-) = (sup/inf, post/ant, left/right) border is contracted.\n Defined in millimetres.\n Defaults to (10, 10, 10).\n gaussian_smooth (int | list, optional): Scale of a Gaussian kernel used to smooth the deformation vector field. Defaults to 5.\n\n Returns:\n [SimpleITK.Image]: The binary mask following the contract.\n [SimpleITK.DisplacementFieldTransform]: The transform representing the contract.\n [SimpleITK.Image]: The displacement vector field representing the contract.\n \"\"\"\n # Define array\n # Used for image array manipulations\n mask_image_arr = sitk.GetArrayFromImage(mask_image)\n\n # The template deformation field\n # Used to generate transforms\n dvf_arr = np.zeros(mask_image_arr.shape + (3,))\n dvf_arr = dvf_arr + np.array([[[vector_asymmetric_contract[::-1]]]])\n dvf_template = sitk.GetImageFromArray(dvf_arr)\n\n # Copy image information\n dvf_template.CopyInformation(mask_image)\n\n dvf_template = sitk.Mask(dvf_template, mask_image)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n\n mask_image_asymmetric_contract = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n # smooth\n if np.any(gaussian_smooth):\n\n if not hasattr(gaussian_smooth, \"__iter__\"):\n gaussian_smooth = (gaussian_smooth,) * 3\n\n dvf_template = sitk.SmoothingRecursiveGaussian(dvf_template, gaussian_smooth)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n mask_image_asymmetric_contract = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n return mask_image_asymmetric_contract, dvf_tfm, dvf_template\n\n\ndef generate_field_asymmetric_extend(\n mask_image, vector_asymmetric_extend=(10, 10, 10), gaussian_smooth=5\n):\n \"\"\"\n Extends a structure (defined using a binary mask) using a specified vector.\n\n Args:\n mask_image ([SimpleITK.Image]): The binary mask to extend.\n vector_asymmetric_extend (tuple, optional): The extension vector applied to the entire binary mask.\n Convention: (+/-, +/-, +/-) = (sup/inf, post/ant, left/right) border is extended.\n Defined in millimetres.\n Defaults to (10, 10, 10).\n gaussian_smooth (int | list, optional): Scale of a Gaussian kernel used to smooth the deformation vector field. Defaults to 5.\n\n Returns:\n [SimpleITK.Image]: The binary mask following the extension.\n [SimpleITK.DisplacementFieldTransform]: The transform representing the extension.\n [SimpleITK.Image]: The displacement vector field representing the extension.\n \"\"\"\n # Define array\n # Used for image array manipulations\n mask_image_arr = sitk.GetArrayFromImage(mask_image)\n\n # The template deformation field\n # Used to generate transforms\n dvf_arr = np.zeros(mask_image_arr.shape + (3,))\n dvf_arr = dvf_arr - np.array([[[vector_asymmetric_extend[::-1]]]])\n dvf_template = sitk.GetImageFromArray(dvf_arr)\n\n # Copy image information\n dvf_template.CopyInformation(mask_image)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n\n mask_image_asymmetric_extend = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n dvf_template = sitk.Mask(dvf_template, mask_image_asymmetric_extend)\n\n # smooth\n if np.any(gaussian_smooth):\n\n if not hasattr(gaussian_smooth, \"__iter__\"):\n gaussian_smooth = (gaussian_smooth,) * 3\n\n dvf_template = sitk.SmoothingRecursiveGaussian(dvf_template, gaussian_smooth)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n\n mask_image_asymmetric_extend = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n return mask_image_asymmetric_extend, dvf_tfm, dvf_template\n\n\ndef generate_field_expand(\n mask_image,\n bone_mask=False,\n expand=3,\n gaussian_smooth=5,\n):\n \"\"\"\n Expands a structure (defined using a binary mask) using a specified vector to define the dilation kernel.\n\n Args:\n mask_image ([SimpleITK.Image]): The binary mask to expand.\n bone_mask ([SimpleITK.Image, optional]): A binary mask defining regions where we expect restricted deformations.\n vector_asymmetric_extend (int |tuple, optional): The expansion vector applied to the entire binary mask.\n Convention: (z,y,x) size of expansion kernel.\n Defined in millimetres.\n Defaults to 3.\n gaussian_smooth (int | list, optional): Scale of a Gaussian kernel used to smooth the deformation vector field. Defaults to 5.\n\n Returns:\n [SimpleITK.Image]: The binary mask following the expansion.\n [SimpleITK.DisplacementFieldTransform]: The transform representing the expansion.\n [SimpleITK.Image]: The displacement vector field representing the expansion.\n \"\"\"\n\n if bone_mask is not False:\n mask_image_original = mask_image + bone_mask\n else:\n mask_image_original = mask_image\n\n # Use binary erosion to create a smaller volume\n if not hasattr(expand, \"__iter__\"):\n expand = (expand,) * 3\n\n expand = np.array(expand)\n\n # Convert voxels to millimetres\n expand = expand / np.array(mask_image.GetSpacing()[::-1])\n\n # Re-order to (x,y,z)\n expand = expand[::-1]\n # expand = [int(i / j) for i, j in zip(expand, mask_image.GetSpacing()[::-1])][::-1]\n\n # If all negative: erode\n if np.all(np.array(expand) <= 0):\n print(\"All factors negative: shrinking only.\")\n mask_image_expand = sitk.BinaryErode(\n mask_image, np.abs(expand).astype(int).tolist(), sitk.sitkBall\n )\n\n # If all positive: dilate\n elif np.all(np.array(expand) >= 0):\n print(\"All factors positive: expansion only.\")\n mask_image_expand = sitk.BinaryDilate(\n mask_image, np.abs(expand).astype(int).tolist(), sitk.sitkBall\n )\n\n # Otherwise: sequential operations\n else:\n print(\"Mixed factors: shrinking and expansion.\")\n expansion_kernel = expand * (expand > 0)\n shrink_kernel = expand * (expand < 0)\n\n mask_image_expand = sitk.BinaryDilate(\n mask_image, np.abs(expansion_kernel).astype(int).tolist(), sitk.sitkBall\n )\n mask_image_expand = sitk.BinaryErode(\n mask_image_expand, np.abs(shrink_kernel).astype(int).tolist(), sitk.sitkBall\n )\n\n if bone_mask is not False:\n mask_image_expand = mask_image_expand + bone_mask\n\n registration_mask_original = convert_mask_to_reg_structure(mask_image_original)\n registration_mask_expand = convert_mask_to_reg_structure(mask_image_expand)\n\n # Use DIR to find the deformation\n _, _, dvf_template = fast_symmetric_forces_demons_registration(\n registration_mask_expand,\n registration_mask_original,\n isotropic_resample=True,\n resolution_staging=[4, 2],\n iteration_staging=[10, 10],\n ncores=8,\n return_field=True,\n )\n\n # smooth\n if np.any(gaussian_smooth):\n\n if not hasattr(gaussian_smooth, \"__iter__\"):\n gaussian_smooth = (gaussian_smooth,) * 3\n\n dvf_template = sitk.SmoothingRecursiveGaussian(dvf_template, gaussian_smooth)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n\n mask_image_symmetric_expand = apply_field(\n mask_image, transform=dvf_tfm, structure=True, interp=1\n )\n\n return mask_image_symmetric_expand, dvf_tfm, dvf_template\n\n\ndef generate_field_radial_bend(\n reference_image,\n body_mask,\n reference_point,\n axis_of_rotation=[0, 0, -1],\n scale=0.1,\n mask_bend_from_reference_point=(\"z\", \"inf\"),\n gaussian_smooth=5,\n):\n \"\"\"\n Generates a synthetic field characterised by radial bending.\n Typically, this field would be used to simulate a moving head and so masking is important.\n\n Args:\n reference_image ([SimpleITK.Image]): The image to be deformed.\n body_mask ([SimpleITK.Image]): A binary mask in which the deformation field will be defined\n reference_point ([tuple]): The point (z,y,x) about which the rotation field is defined.\n axis_of_rotation (tuple, optional): The axis of rotation (z,y,x). Defaults to [0, 0, -1].\n scale (int, optional): The deformation vector length at each point will equal scale multiplied by the distance to that point from reference_point. Defaults to 1.\n mask_bend_from_reference_point (tuple, optional): The dimension (z=axial, y=coronal, x=sagittal) and limit (inf/sup, post/ant, left/right) for masking the vector field, relative to the reference point. Defaults to (\"z\", \"inf\").\n gaussian_smooth (int | list, optional): Scale of a Gaussian kernel used to smooth the deformation vector field. Defaults to 5.\n\n Returns:\n [SimpleITK.Image]: The binary mask following the expansion.\n [SimpleITK.DisplacementFieldTransform]: The transform representing the expansion.\n [SimpleITK.Image]: The displacement vector field representing the expansion.\n \"\"\"\n\n body_mask_arr = sitk.GetArrayFromImage(body_mask)\n\n if mask_bend_from_reference_point is not False:\n if mask_bend_from_reference_point[0] == \"z\":\n if mask_bend_from_reference_point[1] == \"inf\":\n body_mask_arr[: reference_point[0], :, :] = 0\n elif mask_bend_from_reference_point[1] == \"sup\":\n body_mask_arr[reference_point[0] :, :, :] = 0\n if mask_bend_from_reference_point[0] == \"y\":\n if mask_bend_from_reference_point[1] == \"post\":\n body_mask_arr[:, reference_point[1] :, :] = 0\n elif mask_bend_from_reference_point[1] == \"ant\":\n body_mask_arr[:, : reference_point[1], :] = 0\n if mask_bend_from_reference_point[0] == \"x\":\n if mask_bend_from_reference_point[1] == \"left\":\n body_mask_arr[:, :, reference_point[2] :] = 0\n elif mask_bend_from_reference_point[1] == \"right\":\n body_mask_arr[:, :, : reference_point[2]] = 0\n\n pt_arr = np.array(np.where(body_mask_arr))\n vector_ref_to_pt = pt_arr - np.array(reference_point)[:, None]\n\n # Normalise the normal vector (axis_of_rotation)\n axis_of_rotation = np.array(axis_of_rotation)\n axis_of_rotation = axis_of_rotation / np.linalg.norm(axis_of_rotation)\n\n deformation_vectors = np.cross(vector_ref_to_pt[::-1].T, axis_of_rotation[::-1])\n\n dvf_template = sitk.Image(reference_image.GetSize(), sitk.sitkVectorFloat64, 3)\n dvf_template_arr = sitk.GetArrayFromImage(dvf_template)\n\n if scale is not False:\n dvf_template_arr[np.where(body_mask_arr)] = deformation_vectors * scale\n\n dvf_template = sitk.GetImageFromArray(dvf_template_arr)\n dvf_template.CopyInformation(reference_image)\n\n # smooth\n if np.any(gaussian_smooth):\n\n if not hasattr(gaussian_smooth, \"__iter__\"):\n gaussian_smooth = (gaussian_smooth,) * 3\n\n dvf_template = sitk.SmoothingRecursiveGaussian(dvf_template, gaussian_smooth)\n\n dvf_tfm = sitk.DisplacementFieldTransform(\n sitk.Cast(dvf_template, sitk.sitkVectorFloat64)\n )\n reference_image_bend = apply_field(\n reference_image,\n transform=dvf_tfm,\n structure=False,\n default_value=int(sitk.GetArrayViewFromImage(reference_image).min()),\n interp=2,\n )\n\n return reference_image_bend, dvf_tfm, dvf_template\n","sub_path":"platipy/imaging/deformation_fields/deformation_field_operations.py","file_name":"deformation_field_operations.py","file_ext":"py","file_size_in_byte":18091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"392058713","text":"import logging\n\n#basic terminal logging\n#logging.debug('This is a debug message')\n#logging.info('This is an info message')\n#logging.warning('This is a warning message')\n#logging.error('This is an error message')\n#logging.critical('This is a critical message')\n\n\n#increasing loging level to include debug and info\n'''\nlogging.basicConfig(level=logging.DEBUG)\nlogging.debug('This will get logged')\nlogging.info('This will also get loged now')\nlogging.warning('This is a warning message')\nlogging.error('This is an error message')\nlogging.critical('This is a critical message')\n'''\n\n#basic logging to a file\n#logging.basicConfig(filename='logging_file.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n#logging.warning('This will get logged to a file')\n\n\n#Formatting the Output\n#logging.basicConfig(format='%(process)d-%(levelname)s-%(message)s')\n#logging.warning('This is a Warning')\n\n\n#logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\n#logging.info('Admin logged in')\n\n#custom date and time format\n#logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\n#logging.warning('Admin logged out')\n\n#Logging Variable Data\n'''\nname = 'Goran'\nlogging.error('%s raised an error', name)\nlogging.error('{} raised an error' .format(name))\n'''\n\n#Capturing Stack Traces\n'''\na = 5\nb = 0\n\ntry:\n c = a / b\nexcept Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)\n'''\n\n'''\na = 5\nb = 0\n\ntry:\n c = a / b\nexcept:\n logging.error(\"Exception occurred\", exc_info=True)\n\n\na = 5\nb = 0\ntry:\n c = a / b\nexcept:\n logging.exception(\"Exception occurred\")\n '''\n\n\n#custom loggers using handlers\n\n# Create a custom logger\nlogger = logging.getLogger(__name__)\n\n# Create handlers\nc_handler = logging.StreamHandler()\nf_handler = logging.FileHandler('file.log')\nc_handler.setLevel(logging.WARNING)\nf_handler.setLevel(logging.ERROR)\n\n# Create formatters and add it to handlers\nc_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\nf_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nc_handler.setFormatter(c_format)\nf_handler.setFormatter(f_format)\n\n# Add handlers to the logger\nlogger.addHandler(c_handler)\nlogger.addHandler(f_handler)\n\nlogger.warning('This is a warning')\nlogger.error('This is an error')","sub_path":"python_logging.py","file_name":"python_logging.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460181447","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\n# Expected data yang akan diinput ke sistem\nexpected_data = [\"Dummy Name\", \"Jl.Dayeuhkolot, Bandung\",\n \"08979512412\", \"69219497194\"]\n\n# Jumlah kolom yang akan diinput\nn_col = len(expected_data)\n\n# Pengecekan pada Assert untuk Test Case\n\n\ndef AssertData(n_col, expected_data, actual_data):\n for i in range(0, n_col):\n if (expected_data[i] != actual_data[i]):\n return False\n return True\n\n\n# Script Test Case\ntry:\n driver = webdriver.Chrome(\"C:/Setup-Folder/Selenium_Grid/chromedriver\")\n driver.maximize_window()\n driver.get(\"http://localhost/rental_mobil/\")\n time.sleep(2)\n\n # Login System\n\n # Input Username\n e = driver.find_element(By.ID, \"inputUname\")\n e.send_keys(\"admin\")\n\n time.sleep(1)\n\n # Input Password\n e = driver.find_element(By.ID, \"inputPassword\")\n e.send_keys(\"admin\")\n time.sleep(2)\n\n # Login Button Click\n e = driver.find_element(\n By.CSS_SELECTOR, \"button.btn.btn-lg.btn-primary.btn-block\")\n e.submit()\n\n time.sleep(2)\n\n # Login Success\n # Go To Data Master -> Kostumer\n\n # Find Navigation Element\n e = driver.find_element(\n By.XPATH, \"//a[contains(@href, 'http://localhost/rental_mobil/admin/kostumer')]\")\n e.click()\n\n time.sleep(2)\n\n # Find Input Kostumer Button\n e = driver.find_element(\n By.XPATH, \"//a[contains(@href, 'http://localhost/rental_mobil/admin/kostumer_add')]\")\n e.click()\n\n # Input Kostumer Form\n e = driver.find_element(By.NAME, \"nama\")\n e.send_keys(expected_data[0])\n\n e = driver.find_element(By.NAME, \"alamat\")\n e.send_keys(expected_data[1])\n\n e = driver.find_element(\n By.CSS_SELECTOR, \"input[type='radio'][value='L']\")\n e.click()\n expected_data.append(e.get_attribute(\"value\"))\n\n e = driver.find_element(By.NAME, \"hp\")\n e.send_keys(expected_data[2])\n\n e = driver.find_element(By.NAME, \"ktp\")\n e.send_keys(expected_data[3])\n\n time.sleep(2)\n\n # Submit Form\n e = driver.find_element(\n By.CSS_SELECTOR, 'button[type=\"submit\"]')\n e.click()\n\n time.sleep(2)\n\n # Cek Elemen Data pada Row Table yang Terakhir\n actual_data = []\n\n # Menyusun Ulang Array sesuai Format Tabel\n reorder = [0, 4, 2, 3, 1]\n expected_data[:] = [expected_data[i] for i in reorder]\n\n for i in range(n_col):\n # Input ke dalam array setiap table column yang ingin diambil datanya\n e = driver.find_element(\n By.XPATH, \"//tr[last()]/td[\" + str(i+2) + \"]\").text.split('\\n')\n for data in e:\n actual_data.append(data)\n\n # Asserting Condition\n # Jika data yang diinsert sama dengan yang ada pada table column, maka Test Sukses\n # Jika tidak, Test Gagal\n try:\n assert AssertData(n_col, expected_data, actual_data)\n print(\"Assertion for Input Kostumer Success!\")\n except AssertionError:\n print(\"Assertion for Input Kostumer failed!\")\n\n# Close Driver\nfinally:\n if driver is not None:\n driver.close()\n","sub_path":"InputCustomer.py","file_name":"InputCustomer.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"386953094","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 Elitumdevelop S.A, Ing. Mario Rangel\n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).\n\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError\n\nSTATES = [\n ('draft', 'Borrador'),\n ('validate', 'Validado'),\n ('cancel', 'Anulado')\n]\n\n\nclass TermContract(models.Model):\n _name = 'eliterp.term.contract'\n\n _description = 'Terminación de contrato'\n\n @api.multi\n def unlink(self):\n for payment in self:\n if payment.state != 'draft':\n raise UserError(\"No se puede eliminar una terminación de contrato diferente a estado borrador.\")\n return super(TermContract, self).unlink()\n\n @api.multi\n def validate(self):\n new_name = self.env['ir.sequence'].next_by_code('hr.tercontract')\n self.write({\n 'state': 'validate',\n 'name': new_name\n })\n\n @api.model\n def _get_date_format(self):\n return self.env['eliterp.global.functions'].get_date_format_invoice(self.date)\n\n @api.model\n def _get_date_formatend(self):\n return self.env['eliterp.global.functions'].get_date_format_invoice(self.end_date)\n\n @api.multi\n def imprimir_term_contract(self):\n \"\"\"\n Imprimimo\n \"\"\"\n self.ensure_one()\n return self.env.ref('eliterp_hr.eliterp_action_report_term_contract').report_action(self)\n\n date = fields.Date('Fecha documento', default=fields.Date.context_today, required=True, readonly=True,\n states={'draft': [('readonly', False)]})\n name = fields.Char('No. Documento')\n employee = fields.Many2one('hr.employee', string='Empleado', readonly=True,\n required=True,\n states={'draft': [('readonly', False)]})\n identification_id = fields.Char(related='employee.identification_id', string='No. identificación')\n end_date = fields.Date('Fecha salida', default=fields.Date.context_today, required=True, readonly=True,\n states={'draft': [('readonly', False)]})\n state = fields.Selection(STATES, string='Estado', default='draft')\n","sub_path":"eliterp_hr/models/term_contract.py","file_name":"term_contract.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"358362104","text":"from socketio import AsyncServer\nfrom aiohttp import web\n\nfrom bar_controller import BarController\n\nsio = AsyncServer()\napp = web.Application()\nsio.attach(app)\ncontroller = BarController()\n\n\n@sio.on(\"orders\")\nasync def orders(sid):\n \"\"\"Return list of orders\"\"\"\n # pylint: disable=W0613\n return controller.orders\n\n\n@sio.on(\"order\")\nasync def order(sid, data):\n \"\"\"Make an order\"\"\"\n # pylint: disable=W0613\n user = data.get(\"name\", \"\")\n items = data.get(\"items\", [])\n\n # add new order\n new_order = controller.add(\n user=user,\n items=[{\"name\": item[\"name\"]} for item in items])\n\n # response\n await sio.emit(\"add_order\", new_order)\n return {\"success\": True}\n\n\n@sio.on(\"done\")\nasync def done(sid, data):\n \"\"\"Order completed\"\"\"\n # pylint: disable=W0613\n order_id = data.get(\"id\")\n controller.done(order_id)\n return {\"success\": True}\n\nif __name__ == \"__main__\":\n web.run_app(app, port=8090)\n","sub_path":"gem/services/bar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"550276555","text":"\"\"\"\nSection 6 - Q10\n\n조합 구하기\n\n1부터 N까지 번호가 적힌 구슬이 있습니다.\n이중 M개를 뽑는 방법의 수를 출력하는 프로그램을 작성하세요.\n\"\"\"\n\nimport sys\nsys.stdin = open('input.txt', 'r')\n\ndef DFS(d, s):\n\n global cnt\n\n if d==m:\n for ele in res:\n print(ele, end=' ')\n print()\n cnt += 1\n\n else:\n for i in range(s, n+1):\n if ch[i]==0:\n res[d] = i\n ch[i] = 1\n DFS(d+1, i+1)\n ch[i] = 0\n\nif __name__=='__main__':\n n, m = map(int, input().split(' ')) # n=4, m=2\n res = [0] * m\n ch = [0] * (n+1)\n cnt = 0\n DFS(0, 1)\n print(cnt)\n","sub_path":"section_6/Q10/Section6_Q10_김채형.py","file_name":"Section6_Q10_김채형.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174818275","text":"n=int(input())\nfor p in range(n):\n a=int(input())\n b=[int(x) for x in input().split(\" \")]\n c=[int(x) for x in input().split(\" \")]\n d=[int(x) for x in input().split(\" \")]\n e=[]\n for i in range(0,len(b)):\n for j in range(0,len(c)):\n k=b[i]-c[j]\n e.append(k)\n count=0\n for i in d:\n if i in e:\n count += 1\n print(count)","sub_path":"Code/CodeRecords/2614/60829/280732.py","file_name":"280732.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430205184","text":"# -*- coding:utf-8 -*-\r\n# 给你一个链表数组,每个链表都已经按升序排列。\r\n#\r\n# 请你将所有链表合并到一个升序链表中,返回合并后的链表。\r\n#\r\n#\r\n#\r\n# 示例 1:\r\n#\r\n# 输入:lists = [[1,4,5],[1,3,4],[2,6]]\r\n# 输出:[1,1,2,3,4,4,5,6]\r\n# 解释:链表数组如下:\r\n# [\r\n# 1->4->5,\r\n# 1->3->4,\r\n# 2->6\r\n# ]\r\n# 将它们合并到一个有序链表中得到。\r\n# 1->1->2->3->4->4->5->6\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n# 输入:lists = []\r\n# 输出:[]\r\n#\r\n#\r\n# 示例 3:\r\n#\r\n# 输入:lists = [[]]\r\n# 输出:[]\r\n#\r\n#\r\n#\r\n#\r\n# 提示:\r\n#\r\n#\r\n# k == lists.length\r\n# 0 <= k <= 10^4\r\n# 0 <= lists[i].length <= 500\r\n# -10^4 <= lists[i][j] <= 10^4\r\n# lists[i] 按 升序 排列\r\n# lists[i].length 的总和不超过 10^4\r\n#\r\n# Related Topics 堆 链表 分治算法\r\n# 👍 899 👎 0\r\n\r\n\r\n# leetcode submit region begin(Prohibit modification and deletion)\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def mergeTwoList(self, l1, l2):\r\n # 递归调用,合并两个list\r\n # 可以换成lc21的迭代方式\r\n if not l1:\r\n return l2\r\n if not l2:\r\n return l1\r\n if l1.val < l2.val:\r\n l1.next = self.mergeTwoList(l1.next, l2)\r\n return l1\r\n else:\r\n l2.next = self.mergeTwoList(l1, l2.next)\r\n return l2\r\n\r\n def merge(self, lists, left, right):\r\n # 二分法:两两合并,数量减半,再两两合并\r\n if left == right:\r\n return lists[left]\r\n mid = left + (right - left) // 2\r\n l1 = self.merge(lists, left, mid)\r\n l2 = self.merge(lists, mid+1, right)\r\n return self.mergeTwoList(l1, l2)\r\n\r\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\r\n # 方法1:分治:两两合并\r\n if not lists:\r\n return\r\n return self.merge(lists, 0, len(lists)-1)\r\n\r\n\r\n def mergeKLists1(self, lists: List[ListNode]) -> ListNode:\r\n # 方法2:使用优先队列:时间n*log(k),n为总个数,k为链表数\r\n import heapq\r\n res = ListNode(0)\r\n p, head = res, []\r\n # 把每个链表存进优先队列里\r\n for i in range(len(lists)):\r\n if lists[i]:\r\n heapq.heappush(head, (lists[i].val, i))\r\n lists[i] = lists[i].next\r\n\r\n while head:\r\n # 弹出一个最外的val节点,并建立节点\r\n val, index = heapq.heappop(head)\r\n p.next = ListNode(val)\r\n p = p.next\r\n if lists[index]:\r\n heapq.heappush(head, (lists[index].val, index))\r\n lists[index] = lists[index].next\r\n return res.next\r\n\r\n\r\n\r\n\r\n","sub_path":"遇到的/28_k个有序链表合并.py","file_name":"28_k个有序链表合并.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"420426762","text":"__author__ = [\"Francisco Clavero\"]\n__description__ = \"Siamese model training entry point.\"\n__email__ = [\"fcoclavero32@gmail.com\"]\n__status__ = \"Prototype\"\n\nfrom typing import List\n\nimport click\n\nfrom vscvs.cli.decorators import pass_context_to_kwargs, pass_kwargs_to_context\nfrom vscvs.loss_functions import ReductionMixin\nfrom vscvs.utils import load_classification_model_from_checkpoint\n\n\n@click.group()\n@click.option(\n \"--dataset-name\",\n prompt=\"Dataset name\",\n help=\"The name of the dataset to be used for training.\",\n type=click.Choice([\"sketchy\", \"sketchy-test\"]),\n)\n@click.option(\n \"--loss-reduction\",\n prompt=\"Loss reduction\",\n help=\"Reduction function for the loss function.\",\n type=click.Choice(ReductionMixin.reduction_choices),\n)\n@click.option(\n \"--margin\",\n prompt=\"Margin\",\n help=\"The margin for the contrastive loss.\",\n default=0.2,\n)\n@pass_kwargs_to_context\ndef siamese(context: click.Context, *_, **__) -> None:\n \"\"\"Train a siamese model.\"\"\"\n context.obj[\"dataset_name\"] = context.obj[\"dataset_name\"] + \"-siamese\"\n\n\n@click.group()\n@click.option(\n \"--first-branch-checkpoint\",\n prompt=\"First branch checkpoint name\",\n help=\"Name of the checkpoint directory for the first branch.\",\n)\n@click.option(\n \"--first-branch-date\",\n prompt=\"First branch checkpoint date\",\n help=\"Checkpoint date (corresponds to the directory name) for the first branch.\",\n)\n@click.option(\n \"--first-branch-state-dict\",\n prompt=\"First branch state dict\",\n help=\"The state_dict file to be loaded for the first branch.\",\n)\n@click.option(\n \"-tf\",\n \"--first-branch-tag\",\n help=\"Optional tag for first branch model checkpoint and tensorboard logs.\",\n multiple=True,\n)\n@click.option(\n \"--second-branch-checkpoint\",\n prompt=\"Second branch checkpoint name\",\n help=\"Name of the checkpoint directory for the second branch.\",\n)\n@click.option(\n \"--second-branch-date\",\n prompt=\"Second branch checkpoint date\",\n help=\"Checkpoint date (corresponds to the directory name) for the second branch.\",\n)\n@click.option(\n \"--second-branch-state-dict\",\n prompt=\"Second branch state dict\",\n help=\"The state_dict file to be loaded for the second branch.\",\n)\n@click.option(\n \"-ts\",\n \"--second-branch-tag\",\n help=\"Optional tag for second branch model checkpoint and tensorboard logs.\",\n multiple=True,\n)\n@pass_kwargs_to_context\ndef pretrained(\n context: click.Context,\n first_branch_checkpoint: str,\n first_branch_date: str,\n first_branch_state_dict: str,\n first_branch_tag: List[str],\n second_branch_checkpoint: str,\n second_branch_date: str,\n second_branch_state_dict: str,\n second_branch_tag: List[str],\n *_,\n **__\n) -> None:\n \"\"\"Train a siamese model from pretrained model weights.\"\"\"\n from vscvs.models import ResNext\n\n if first_branch_checkpoint and first_branch_date and first_branch_state_dict:\n context.obj.pop(\"first_branch_checkpoint\")\n context.obj.pop(\"first_branch_date\")\n context.obj.pop(\"first_branch_state_dict\")\n context.obj.pop(\"first_branch_tag\")\n context.obj[\"embedding_network_0\"] = load_classification_model_from_checkpoint(\n ResNext,\n first_branch_state_dict,\n first_branch_checkpoint,\n first_branch_date,\n *first_branch_tag\n )\n if second_branch_checkpoint and second_branch_date and second_branch_state_dict:\n context.obj.pop(\"second_branch_checkpoint\")\n context.obj.pop(\"second_branch_date\")\n context.obj.pop(\"second_branch_state_dict\")\n context.obj.pop(\"second_branch_tag\")\n context.obj[\"embedding_network_1\"] = load_classification_model_from_checkpoint(\n ResNext,\n second_branch_state_dict,\n second_branch_checkpoint,\n second_branch_date,\n *second_branch_tag\n )\n\n\n@click.command()\n@pass_context_to_kwargs\ndef cnn(*args, **kwargs) -> None:\n \"\"\"Train a siamese CNN model.\"\"\"\n from vscvs.trainers.siamese import train_siamese_cnn\n\n click.echo(\"siamese cnn - {} dataset\".format(kwargs[\"dataset_name\"]))\n train_siamese_cnn(*args, **kwargs)\n\n\n@click.command()\n@pass_context_to_kwargs\ndef resnet(*args, **kwargs) -> None:\n \"\"\"Train a siamese ResNet model.\"\"\"\n from vscvs.trainers.siamese import train_siamese_resnet\n\n click.echo(\"siamese resnet - {} dataset\".format(kwargs[\"dataset_name\"]))\n train_siamese_resnet(*args, **kwargs)\n\n\n@click.command()\n@pass_context_to_kwargs\ndef resnext(*args, **kwargs) -> None:\n \"\"\"Train a siamese ResNext model.\"\"\"\n from vscvs.trainers.siamese import train_siamese_resnext\n\n click.echo(\"siamese resnext - {} dataset\".format(kwargs[\"dataset_name\"]))\n train_siamese_resnext(*args, **kwargs)\n\n\nfor command in [cnn, resnet, resnext]:\n pretrained.add_command(command)\n siamese.add_command(command)\n\n\nsiamese.add_command(pretrained)\n","sub_path":"vscvs/cli/train/siamese.py","file_name":"siamese.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"224868018","text":"import pandas as pd\nimport numpy as np\nimport re, math, hashlib, json\n\ndef get_md5(string):\n '''\n 加密函数\n '''\n return hashlib.md5(str(string).encode('utf-8')).hexdigest()\n\n\ndef filter_numbers(n):\n '''\n 过滤清洗手机号码\n '''\n pattern = re.compile('(.*)?(1(34|35|36|37|38|39|47|50|51|52|57|58|59|72|78|82|83|84|87|88|98|30|31|32|45|55|56|66|71|75|76|85|86|33|49|53|73|77|80|81|89|99|70)\\d{8})(\\.0)?$')\n m = pattern.match(n)\n if m:\n return m.groups()[1]\n else:\n return n[:-2] if n.endswith('.0') else n\n \n\ndef format_json(x, null_content):\n if pd.isnull(x):\n # null_content = '{\"query_times\": \"{\\\\\"7d\\\\\": 0, \\\\\"15d\\\\\": 0, \\\\\"30d\\\\\": 0, \\\\\"3m\\\\\": 0, \\\\\"6m\\\\\": 0, \\\\\"12m\\\\\": 0, \\\\\">12m\\\\\": 0}\", \"query_org_cnt\": \"{\\\\\"7d\\\\\": 0, \\\\\"15d\\\\\": 0, \\\\\"30d\\\\\": 0, \\\\\"3m\\\\\": 0, \\\\\"6m\\\\\": 0, \\\\\"12m\\\\\": 0, \\\\\">12m\\\\\": 0}\", \"query_org_types\": \"[]\"}'\n return null_content\n else:\n x = json.loads(x)\n for k, v in x.items():\n x[k] = json.dumps(v)\n return json.dumps(x) \n\n\ndef json_to_df(x):\n import json\n x = x.apply(lambda s:pd.Series(json.loads(s)))\n return x.replace('', np.nan).convert_objects(convert_numeric=True)\n\n\n\ndef prob_to_score(p, A=423.82, B=72.14):\n \"\"\"\n args:\n p: 模型输出的概率,对1的概率\n A: 基础分补偿,不用修改\n B: 刻度,不用修改\n return:\n score: 分数,头尾掐掉\n \"\"\"\n odds = p / (1 - p)\n score = A - B * math.log(odds)\n score = max(350, score)\n score = min(950, score)\n return score\n\n\ndef sigmoid(logit):\n \"\"\"\n args:\n logit: logistics model 输出的值\n return:\n 激活函数返回的值\n \"\"\"\n return 1.0 / (1 + math.exp(-logit))\n\n\ndef score_to_risklevel(score, cut_points=None):\n \"\"\"\n args:\n score: 由概率转换出的分数值\n cut_points: 十等分切割点\n return:\n risk_level: 返回一个风险等级\n \"\"\"\n if not cut_points:\n cut_points = [350, 410, 470, 530, 590, 650, 710, 770, 830, 890, 950]\n return int(pd.cut([score], bins=cut_points, labels=list(range(10, 0, -1)), include_lowest=True)[0])\n\n\nimport re\ndef get_phone_reg_time(x):\n r = []\n if not pd.isnull(x):\n r = re.findall(r'使用(\\d+)个月', x)\n return r[0] if len(r) > 0 else np.nan\n\n\nimport json\ndef extract_nested_json(x):\n \"\"\"\n args: \n x: json-formated data\n return:\n anti-nested formated data 将多层嵌套的json提取出只有一层的json,返回数据也是json类型 \n example:\n df.data.map(extract_nested_json).apply(lambda s:pd.Series(json.loads(s)))\n \"\"\"\n global_dic = {}\n def json_to_dict(key, value, prefix=''):\n if isinstance(value, dict):\n for k, v in value.items():\n if key and prefix:\n json_to_dict(k, v, prefix + '_' + key)\n elif key and not prefix:\n json_to_dict(k, v, key)\n elif not key and prefix:\n json_to_dict(k, v, prefix)\n else:\n json_to_dict(k, v, '')\n else:\n if prefix:\n key = prefix + '_' + key\n global_dic[key] = value\n tmp = json.loads(x)\n try:\n json_to_dict('', tmp)\n except:\n global_dic['_ERROR_'] = 1\n return json.dumps(global_dic)","sub_path":"utils_tools.py","file_name":"utils_tools.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364751642","text":"# https://atcoder.jp/contests/code-festival-2015-morning-middle/tasks/cf_2015_morning_easy_c\n\nN, K, M, R = map(int, input().split())\nS = [int(input()) for _ in range(N - 1)]\nS.sort(reverse=True)\nS0 = S[:K]\n\n# 二分探索\nlo = 0\nhi = M\n\nwhile lo < hi:\n mi = (lo + hi) // 2\n S = S0 + [mi]\n S.sort()\n mS = sum(S[1:]) / K\n if mS < R:\n lo = mi + 1\n else:\n hi = mi\n\nif lo == M:\n print(-1)\nelse:\n print(lo)","sub_path":"AtCoder/012_CodeFestival2015mmA/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393888083","text":"# Chapter 6 Programming Exercises 6 - Average of numbers\n# Author: Steve Schroeder\n# Sum and return average of numbers in file. Add exception handling.\n#\n\n\ndef main():\n try:\n user_file = program_input()\n average = program_processing(user_file)\n program_output(average)\n except IOError:\n print(\"That file does not exist. Program ending\")\n except ValueError:\n print(\"One or more of the values in this file are not valid. Program ending.\")\n except:\n print(\"An unknown error occurred. Program ending.\")\n\n\ndef program_input():\n user_file = input(\"What file to read? \")\n return user_file\n\n\ndef program_processing(user_file):\n user_file = open(user_file, \"r\")\n sum_lines = 0\n count = 0\n average = 0\n for lines in user_file:\n count = count + 1\n sum_lines += int(lines.rstrip())\n average = sum_lines / count\n return average\n\n\ndef program_output(average):\n print(\"The average of the numbers contained in the file is: \" + str(average))\n\n\nmain()","sub_path":"Chapter6/Averages.py","file_name":"Averages.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55044880","text":"import cPickle\nimport pdb\nimport os\nimport glob\n\n# word, vocab = cPickle.load(open('./'+vocab_file))\nword, vocab = cPickle.load(\n open('./vocab_coco_14.pkl', 'rb'))\n# input_file = 'save/coco_451.txt'\n\ninput_file = './train_coco_14.txt.index' # syn_val_words\noutput_file = './train.txt'\n\nwith open(output_file, 'w')as fout:\n with open(input_file)as fin:\n for line in fin:\n #line.decode('utf-8')\n line = line.split()\n #line.pop() \n #line.pop() \n line = [int(x) for x in line]\n line = [word[x] for x in line if x != 0]\n # if 'OTHERPAD' not in line:\n line = ' '.join(line) + '\\n'\n fout.write(line)#.encode('utf-8'))\n","sub_path":"unconditional_generation/data/MS_COCO/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585294025","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n def deleteDuplication(self, pHead):\n # write code here\n if not pHead.next: # 如果只有1个节点\n return pHead\n pre = pHead\n p = pre.next\n dic = {}\n dic[pre.val] = 1\n while p:\n if p.val in dic: # 如果重复\n pre.next = p.next\n p = pre.next\n dic[pre.val] = '#' # 标记,后续删除pre对应的节点\n else:\n dic[p.val] = 1\n pre = p\n p = p.next\n # 保证pHead对应的节点是需要保留的\n while dic[pHead.val] == '#':\n pHead = pHead.next\n p = pHead\n while p:\n if dic[p.next.val] == '#':\n pre = p.next\n p.next = pre.next\n else:\n p = p.next\n return pHead\n","sub_path":"删除链表中重复的节点/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291635843","text":"# Task 1. Список\nspisok = []\nspisok += [1, 2.3, 'Pikar', 'zeleniy', 'Pikaya']\nprint(spisok)\nspisok.append([8, 88, 888])\nspisochek = ['Stat', 'Python', 'Diskra', 'Proekt', 10, False]\nspisok += spisochek\nprint(spisok)\ndel spisok[3]\nprint(spisok)\nspisok.remove('Pikaya')\nprint(spisok)\nspisok[1] = 10\nprint(spisok)\n\n# проверяем, сколько чисел 10 в нашем списке\na = spisok.count(10)\nprint('таких элементов', a, 'штуки')\n\ndlina = len(spisok)\nprint('длина:', dlina)\n\n\n# Task 2. Кортеж\nmy_tuple = (1, 2.3, 'Pikar', 'zeleniy', 'Pikaya', [8, 88, 888], 'Stat', 'Python', 'Diskra', 'Proekt', 10, False)\nprint(my_tuple)\n# проверяем, сколько чисел 10 в нашем кортеже\nprint(my_tuple.count(10))\n\ndlina = len(my_tuple)\nprint(dlina)\n\n\n# Task 3\nfriends = ['Asya', 'Nastya', 'Lesha', 'Masha', 'Olya']\nfor friend in friends:\n print(\"Hello, \"+friend)\nprint(\"Hello everyone!\")\n\n\n# Task 4. Напишите программу, поэлементно складывающую две последовательные коллекции (заданных вами) одного размера\nlist1 = [1, 2, 3]\nlist2 = [-3, 3, 13]\nlist_sum = []\nq = len(list1)\nfor i in range(q):\n sumi = list1[i]+list2[i]\n list_sum.append(sumi)\nprint(list_sum)\n\n\n\n# Task 5. Создайте программу, принимающую 3 числа, соответствующих длинам сторон треугольника,\n# и выводящую его тип (равнобедренный, разносторонний, равносторонний)\na = int(input('введите сторону a '))\nb = int(input('введите сторону b '))\nc = int(input('введите сторону c '))\nif (a + b < c) or (a + c < b) or (b + c < a):\n print('эти значения не могут быть сторонами треугольника')\nelse:\n if a == b == c:\n print('равносторонний')\n elif (a == b) or (a == c) or (b == c):\n print('равнобедренный')\n else:\n print('разносторонний')\n\n\n\n# Task 6. Разверните лист всеми известными вам способами\nelements = [1, 2, ('fruit', 5)]\nprint(elements)\n\nprint(elements[::-1]) ##1\n\ni = len(elements) -1 ##2\nelem_inv = []\nwhile i >= 0:\n elem_inv += [elements[i]]\n i -= 1\nprint(elem_inv)\n\nelements.reverse() ##3\nprint(elements)\n\n\n# Task 7. Теперь напишите программу, отбирающую из коллекции чисел только чётные, а затем считающая их сумму\ninteresting_numbers = (3, 2, 5, 7, 14, 26, 32, 31, 37, 6)\nsumma = 0\nfor elem in interesting_numbers:\n if elem % 2 == 0:\n summa += elem\nprint(summa)","sub_path":"HW_4.py","file_name":"HW_4.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"579755426","text":"#%%\n\nimport numpy as np\nimport os\ndataset_path = \"/content/drive/MyDrive/VOCdevkit/VOC2007/\"\n\nimg_dir = \"JPEGImages\"\nannot_dir = \"Annotations\"\n\nos.chdir(dataset_path)\n\n\n\nobj_class = ['person', # Person\n 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', # Animal\n 'aeroplane', 'bicycle', 'boat', 'bus', 'car', 'motorbike', 'train', # Vehicle\n 'bottle', 'chair', 'dining table', 'potted plant', 'sofa', 'tv/monitor' # Indoor\n ]\n\nimport cv2\nimport xml.etree.ElementTree as Et\nimport os\nimport pandas as pd\nfrom PIL import Image\nfrom sklearn.preprocessing import LabelBinarizer\nimport tensorflow as tf\nfrom tensorflow.keras import models, layers\nfrom tensorflow.keras.layers import LeakyReLU\nimport matplotlib.pyplot as plt\n\nfrom utils import * \n\nfrom tqdm import tqdm\n\n\n\n#%%\ndef VOCDataset_encoder( img_file_list, object_class, mode = 'train'):\n '''\n Input:\n img_file_list : list of image file names\n object_class : list of class name of objects \n\n Output:\n resized image, encoded matrix \n '''\n\n img_dir = \"JPEGImages\"\n annot_dir = \"Annotations\"\n S = 7\n B = 2\n C = len(object_class)\n\n label_operater = LabelBinarizer()\n label_operater.fit(object_class)\n \n image_list = []\n labels = []\n\n if mode =='train':\n for img_file_name in tqdm(img_file_list):\n \n image = cv2.imread(os.path.join(img_dir,img_file_name))\n annot_filename = img_file_name.split('.')[0]+'.xml'\n \n xml = open(os.path.join(annot_dir, annot_filename), \"r\")\n tree = Et.parse(xml)\n root = tree.getroot()\n objects = root.findall(\"object\")\n image_annotation =[]\n for _object in objects:\n name = _object.find('name').text\n bndbox = _object.find('bndbox')\n xmin = int(bndbox.find('xmin').text)\n xmax = int(bndbox.find('xmax').text)\n ymin = int(bndbox.find('ymin').text)\n ymax = int(bndbox.find('ymax').text)\n image_annotation.append([name,(xmin+xmax)/2, (ymin+ymax)/2,xmax- xmin, ymax- ymin ])\n \n \n label_matrix = np.zeros((S, S, C + 5 ))\n for box in image_annotation:\n x_normed = 7*box[1]/image.shape[1]\n grid_x = np.int(np.trunc(x_normed))\n x_normed = x_normed - grid_x\n \n y_normed = 7*box[2]/image.shape[0]\n \n grid_y = np.int(np.trunc(y_normed))\n y_normed = y_normed - grid_y\n w_normed = box[4]/image.shape[1]\n h_normed = box[3]/image.shape[0]\n label_matrix[grid_y, grid_x, :5] = 1, x_normed, y_normed , w_normed, h_normed\n label_matrix[grid_y, grid_x, 5:] = label_operater.transform(np.expand_dims(box[0],axis = 0))[0]\n resized = cv2.resize(image, (448,448), interpolation = cv2.INTER_AREA)\n labels.append(label_matrix)\n image_list.append(resized)\n image_list = np.array(image_list)\n labels = np.array(labels)\n return image_list, labels\n \n else:\n for img_file_name in tqdm(img_file_list):\n image = cv2.imread(os.path.join(img_dir,img_file_name))\n resized = cv2.resize(image, (448,448), interpolation = cv2.INTER_AREA)\n image_list.append(resized)\n image_list = np.array(image_list)\n\n return image_list\n\n#%%\n\nclass Yolo(tf.keras.Model):\n '''\n Yolo model \n '''\n\n def __init__(self, S, B,classes_number, **kwargs):\n\n super(Yolo, self).__init__(name = 'Yolo', **kwargs)\n\n self.classes_number = classes_number\n self._S = S\n self._B = B\n\n architecture = [['Conv' ,7, 64, 2], ['Pool', 2,2]] +\\\n [['Conv', 3, 192,1], ['Pool', 2,2]] +\\\n [['Conv', 1, 128,1],['Conv', 3,256,1],['Conv', 1, 256,1],['Conv', 3, 512,1], ['Pool', 2,2]] +\\\n [['Conv', 1, 256,1],['Conv', 3,512,1]]*4 + [['Conv', 1, 512,1],['Conv', 3,1024,1],['Pool', 2,2]]+ \\\n [['Conv', 1, 512,1],['Conv', 3,1024,1]]*2 + [['Conv', 3,1024,1],['Conv', 3,1024,2]] +\\\n [['Conv', 3,1024,2]]*2 \n self.nn = []\n for i in range(len(architecture)):\n if architecture[i][0] == 'Conv':\n self.nn.append(layers.Conv2D(architecture[i][2], (architecture[i][1], architecture[i][1]),\n strides =architecture[i][3], activation = LeakyReLU(0.1), input_shape = (448,448, 3 ), padding = 'same'))\n \n if architecture[i][0] == 'Pool':\n self.nn.append(layers.MaxPooling2D(pool_size = (architecture[i][1],architecture[i][1]), \n strides = architecture[i][2]))\n\n self.nn.append(layers.Flatten())\n self.nn.append(layers.Dense(4096, activation= LeakyReLU(0.1)))\n self.nn.append(layers.Dropout(0.5))\n self.nn.append(layers.Dense(self._S*self._S*(5*self._B + classes_number),\n activation=\"softmax\", \n kernel_regularizer=tf.keras.regularizers.l2(0.0005),\n bias_regularizer=tf.keras.regularizers.l2(0.0005) ) )\n\n def call(self,inputs):\n x = self.nn[0](inputs)\n for layer in self.nn[1:]:\n x= layer(x)\n \n \n return x\n\n#%%\ntrain_ratio = 0.9\n\nnum = len(os.listdir(img_dir))\ntrain_idx = np.random.choice(np.arange(num),round(num*train_ratio) )\ntest_idx = np.setdiff1d(np.arange(num), train_idx)\n\ntrain_img_file = np.array(os.listdir(img_dir))[train_idx]\ntest_img_file =np.array(os.listdir(img_dir))[test_idx]\n\n\n\ntrain_datasets, train_labels = VOCDataset_encoder(train_img_file, object_class= obj_class, mode = 'train')\ntest_datasets = VOCDataset_encoder(test_img_file, object_class= obj_class, mode = 'test')\n\nnp.save(\"/content/drive/MyDrive/train_datasets\", train_datasets)\nnp.save(\"/content/drive/MyDrive/train_labels\", train_labels)\nnp.save(\"/content/drive/MyDrive/test_datasets\", test_datasets) \n\n#%%\n\ntrain_datasets = np.load(\"/content/drive/MyDrive/train_datasets.npy\")\ntrain_labels = np.load(\"/content/drive/MyDrive/train_labels.npy\")\ntest_datasets = np.load(\"/content/drive/MyDrive/test_datasets.npy\")\n\ntrain_for_tf = tf.data.Dataset.from_tensor_slices((train_datasets, train_labels))\ntrain_for_tf = train_for_tf.shuffle(buffer_size = len(train_for_tf)).batch(64)\n\ndel(train_datasets)\ndel(train_labels)\ndel(test_datasets)\n\n\n#%%\n\nfrom tensorflow.keras.losses import Loss\n\n\nclass Yolo_loss(Loss):\n '''\n Loss function of Yolo v1\n '''\n def __init__(self,num_classes = 20 , num_cell = 7, num_boxes = 2, lambda_coord = 0.5, lambda_noobj = 5.0 ):\n super().__init__()\n self._C = num_classes\n self._S = num_cell\n self._B = num_boxes\n self._lambda_coord = lambda_coord\n self._lambda_noobj = lambda_noobj\n def call(self,y_true ,y_pred):\n pred_mat = tf.reshape(y_pred, [-1,self._S,self._S, self._C+self._B*5])\n\n predicted_classes = pred_mat[:,:,:,(5*self._B):]\n predicted_conf = pred_mat[:,:,:,0:self._B]\n predicted_coord = tf.reshape(pred_mat[:,:,:,self._B:(self._B*5)], [-1,self._S,self._S,self._B,4])\n responsible = tf.cast(tf.reshape(y_true[:,:,:,0], [-1, self._S, self._S, 1]), tf.float32)\n\n coord_target = tf.reshape(y_true[:,:,:,1:5], [-1,self._S,self._S,1,4])\n\n coord_target = tf.cast(tf.tile(coord_target, [1, 1, 1, self._B, 1]), tf.float32)\n\n classes_target = tf.cast(y_true[:,:,:,5:],tf.float32)\n predicted_coord = tf.stack([predicted_coord[:,:,:,:,:2], tf.math.sqrt(predicted_coord[:,:,:,:,2:4])], axis = 5)\n predicted_coord = tf.reshape(predicted_coord, [-1,self._S,self._S,self._B,4])\n\n iou_predict_truth = self.compute_iou(predicted_coord, coord_target)\n\n responsible_idx = tf.reduce_max(iou_predict_truth, 3, keepdims=True)\n responsible_idx = tf.cast((iou_predict_truth >= responsible_idx), tf.float32) * tf.cast(responsible, tf.float32)\n ## responsible box가 두개인 경우 모두 사용하기 위함\n\n coord_loss = self.compute_coord_loss(coord_target, predicted_coord, responsible_idx)\n res_conf_loss, nores_conf_loss = self.compute_conf_loss(predicted_conf, iou_predict_truth, responsible_idx)\n class_loss = self.compute_class_loss(classes_target, predicted_classes, responsible)\n\n total_loss = coord_loss+ res_conf_loss + nores_conf_loss + class_loss\n \n return total_loss\n\n def compute_coord_loss(self,true, pred ,responsible):\n coord_mask = tf.expand_dims(responsible, 4)\n boxes_delta = coord_mask * tf.square(true - pred)\n coord_loss_return = tf.reduce_mean(tf.reduce_sum(tf.square(boxes_delta), axis=[1, 2, 3, 4]),\n name='coord_loss') *self._lambda_coord\n return coord_loss_return\n\n def compute_conf_loss(self,pred, iou, responsible):\n not_responsible = tf.ones_like(responsible, dtype=tf.float32) - responsible\n\n responsible_delta = responsible * (iou - pred)\n responsible_loss = tf.reduce_mean(tf.reduce_sum(tf.square(responsible_delta), axis=[1, 2, 3]), name='respons_conf_loss')\n\n not_responsible_delta = not_responsible * pred\n not_responsible_loss = tf.reduce_mean(tf.reduce_sum(tf.square(not_responsible_delta), axis=[1, 2, 3]), name='norespons_conf_loss')*self._lambda_noobj\n\n return responsible_loss , not_responsible_loss\n\n def compute_class_loss(self,true, pred, detector):\n class_loss_return = detector*(true-pred)\n class_loss_return = tf.reduce_mean(tf.reduce_sum(tf.square(class_loss_return), axis = [1,2,3]), name = 'class_loss')\n return class_loss_return\n def compute_iou(self, boxes1, boxes2):\n boxes1 = tf.stack([boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2] / 2.0,\n boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] / 2.0,\n boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,\n boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0],axis =4)\n boxes2 = tf.stack([boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2] / 2.0,\n boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] / 2.0,\n boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,\n boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0] ,axis = 4)\n # calculate the left up point & right down point\n lu = tf.maximum(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])\n rd = tf.minimum(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])\n # intersection\n intersection = tf.maximum(0.0, rd - lu)\n inter_square = intersection[:, :, :, :, 0] * intersection[:, :, :, :, 1]\n # calculate the boxs1 square and boxs2 square\n square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * \\\n (boxes1[:, :, :, :, 3] - boxes1[:, :, :, :, 1])\n square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * \\\n (boxes2[:, :, :, :, 3] - boxes2[:, :, :, :, 1])\n union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)\n\n return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)\n\n#%%\n\nclass MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n \n def __init__(self, initial_learning_rate):\n self.initial_learning_rate = initial_learning_rate\n\n def __call__(self, step):\n if step <75:\n return self.initial_learning_rate \n elif step <105:\n return self.initial_learning_rate /10\n else:\n return self.initial_learning_rate /100\n\n\noptimizer = tf.keras.optimizers.SGD(learning_rate= MyLRSchedule(0.001), momentum= 0.9)\nloss_metric = tf.keras.metrics.Mean()\n\n\n#%%\n\nepochs = 135\n\nmodel = Yolo(classes_number= len(obj_class) , S =7 , B = 2)\n\nyolo_loss = Yolo_loss()\n\nfor epoch in range(epochs):\n # Iterate over the batches of the dataset.\n print(\"Epoch: %d\" % epoch)\n for step, (x_batch_train,y_batch_train) in enumerate(train_for_tf):\n with tf.GradientTape(persistent=True) as tape:\n result = model(tf.cast(x_batch_train, tf.float32))\n # Compute reconstruction loss\n loss = yolo_loss(y_batch_train,result )\n\n grads = tape.gradient(loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n loss_metric(loss)\n if step % 10 == 0:\n print(\"step %d: mean loss = %.4f\" % (step, loss_metric.result()))\n\ntf.saved_model.save(model, '/content/drive/MyDrive/data/Yolo_v1')\n\n#%%\nmodel = tf.saved_model.load('/content/drive/MyDrive/data/Yolo_v1')\n\n\n#%%\n\ndef test_time_dectection(model, image, object_class):\n '''\n Input:\n model : Yolo model\n image : image\n object_class : list of object names\n Output:\n plotting estimated object box \n '''\n\n resize = cv2.resize(image, (448,448),interpolation = cv2.INTER_AREA)\n resize = tf.cast(resize, tf.float32)\n resize = tf.reshape(resize, [1,448,448,3])\n result = model(resize)\n\n offset = tf.reshape(tf.cast([[i%7, i//7] for i in np.arange(49)],tf.float32), [7,7,2])\n\n pred_mat = tf.reshape(result, [ 7,7,30])\n\n ### box 재조정\n pred_mat[:,:,2:4] = pred_mat[:,:,2:4] + offset\n pred_mat[:,:,6:8] = pred_mat[:,:,6:8] + offset\n\n pred_mat[:,:,2] = pred_mat[:,:,2]/7 *image.shape[1]\n pred_mat[:,:,3] = pred_mat[:,:,3]/7 *image.shape[0]\n pred_mat[:,:,4] = pred_mat[:,:,4] *image.shape[1]\n pred_mat[:,:,5] = pred_mat[:,:,5] *image.shape[0]\n\n pred_mat[:,:,6] = pred_mat[:,:,6]/7 *image.shape[1]\n pred_mat[:,:,7] = pred_mat[:,:,7]/7 *image.shape[0]\n pred_mat[:,:,8] = pred_mat[:,:,8] *image.shape[1]\n pred_mat[:,:,9] = pred_mat[:,:,9] *image.shape[0]\n\n predicted_classes = pred_mat[:,:,10:]\n predicted_classes = tf.reshape(predicted_classes , [49,20])\n\n predicted_classes = tf.reshape(tf.tile(predicted_classes, [1,2]), [98,20])\n\n predicted_conf = pred_mat[:,:,:2]\n predicted_conf = tf.reshape(predicted_conf, [98,1])\n\n predicted_coord = tf.reshape(pred_mat[:,:,2:10], [98,4])\n\n result_box = tf.concat([predicted_conf, predicted_coord, predicted_classes], axis = 1)\n\n result_box = np.asarray(result_box)\n\n NMS_idx = filtering_and_non_max_suppression(result_box, object_class)\n\n for k, class_name in NMS_idx:\n image_rec = cv2.rectangle(image,(np.int(result_box[1] -result_box[3]/2),np.int(result_box[2] -result_box[4]/2)),\n (np.int(result_box[1] +result_box[3]/2),np.int(result_box[2] +result_box[4]/2)),(255,0,0), 2)\n cv2.putText(image_rec, class_name.astype(str),(result_box[1], result_box[2]), cv2.FONT_HERSHEY_SIMPLEX,0.9,(255,0,0),2)\n\n plt.imshow(image)\n plt.show()\n\n#%% Detection\n\n\nimage = image_read(img_dir, annot_dir, 1)\n\ntest_time_dectection(image,model,obj_class)","sub_path":"src/Yolo_v1.py","file_name":"Yolo_v1.py","file_ext":"py","file_size_in_byte":15116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"402419688","text":"import cv2 as cv\nimport os\ndef main():\n filename = os.getcwd()+'/datas/videos/Armbot.mp4'\n cap = cv.VideoCapture(filename)\n sec = 0\n count = 0\n frameRate = 0.5\n success = cap.isOpened()\n while success:\n sec = sec + frameRate\n success = writeFrame(cap,sec,count)\n count = count+1\n\ndef writeFrame(vc,sec,count):\n dname = os.getcwd()+'/datas/images/imageframes'\n if not os.path.exists(dname):\n os.makedirs(dname) \n vc.set(cv.CAP_PROP_POS_MSEC,sec*1000)\n hasF , img = vc.read()\n if hasF:\n cv.imwrite(dname+'/image_'+str(count)+'.png',img)\n # \n return hasF\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"ConvertVideotoImages.py","file_name":"ConvertVideotoImages.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176149469","text":"def setup_logger(nocol=False):\n import logging\n import sys\n\n for lname in ['cypoisson']:\n LG = logging.getLogger(lname)\n try:\n from falafel.logger import Formatter\n from falafel.logger import NOCOLORS\n LG.setLevel('DEBUG')\n sh = logging.StreamHandler(sys.stdout)\n\n if nocol:\n sh.setFormatter(Formatter(pre=nocolors, lenstrip=None, contline=None))\n else:\n sh.setFormatter(Formatter())\n LG.addHandler(sh)\n except ImportError:\n logging.basicConfig(level='DEBUG', stream=sys.stdout)\n\nsetup_logger()\n\ndef pytest_addoption(parser):\n parser.addoption(\"--interactive\", action=\"store_true\")\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204225452","text":"\"\"\"\nThis is a proof of concept to demonstrate how Alexa might be used to support the bids team.\n\"\"\"\n\nfrom __future__ import print_function\nfrom bs4 import BeautifulSoup\nimport requests\nfrom xhtml2pdf import pisa\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\nfrom operator import itemgetter\nimport boto3\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nimport json\nimport hashlib\nimport jsonpickle\n\nfrom os import environ, getenv\n\nSNS_EMAIL_TOPIC = environ.get('SNS_EMAIL_TOPIC')\n\nresponse = requests.get('https://aws.amazon.com/products/').text\n\n\ndef getServiceList():\n soup = BeautifulSoup(response, 'html.parser')\n serviceList = []\n for service in soup.find_all(class_='lb-content-item'):\n serviceUrl = service.a['href']\n serviceName = service.a.contents[0].strip()\n serviceList.append({'serviceName': serviceName,\n 'serviceUrl': 'https://aws.amazon.com'\n + serviceUrl})\n return serviceList\n\n\ndef getServiceDescription(serviceUrl):\n response = requests.get(serviceUrl).text\n soup = BeautifulSoup(response, 'html.parser')\n return soup.find('div', {'id': 'aws-page-content'})\n\n\ndef createPdf(serviceUrl):\n outputFilename = '/tmp/service_description.pdf'\n with open(outputFilename, 'w+b') as resultFile:\n pisa.CreatePDF(getServiceDescription(serviceUrl).encode('utf-8'),\n resultFile)\n\n\ndef findService(serviceName):\n confidenceList = []\n serviceList = getServiceList()\n for service in serviceList:\n service.update({'ratio': fuzz.ratio(service, serviceName)})\n confidenceList.append(service)\n sortedConfidenceList = sorted(confidenceList,\n key=itemgetter('ratio'), reverse=True)\n return sortedConfidenceList[0]\n\n\ndef getAllParagraphs(url):\n soup = BeautifulSoup(url, 'html.parser')\n paragraphs = []\n content = soup.find(role='main')\n for paragraph in content.find_all('p'):\n if paragraph is not None:\n paragraphs.append(paragraph.text.strip())\n return paragraphs\n\n\ndef getUrlDigest(url):\n m = hashlib.md5()\n m.update(url.content)\n digest = m.hexdigest()\n return digest\n\n\ndef push_sns(event, snsTopic):\n if getenv('AWS_SAM_LOCAL'):\n print('SAM_LOCAL DETECTED')\n sns = boto3.client('sns',\n endpoint_url='http://localstack:4575',\n region_name='us-east-1')\n else:\n sns = boto3.client('sns')\n response = sns.publish(\n TopicArn=snsTopic,\n Message=json.dumps(event)\n )\n return response\n\n\ndef build_email(serviceUrl):\n \"\"\"\n Build the email before sending.\n \"\"\"\n msg = MIMEMultipart()\n msg.preamble = 'Multipart message.\\n'\n\n # Get the first 3 two paragraphs\n part = MIMEText('Service Description: %r ' %\n getAllParagraphs(requests.get(serviceUrl).text)[0:2])\n msg.attach(part)\n\n # Now encode it so that it will traverse our functions intact\n msg = jsonpickle.encode(msg)\n\n return msg\n\n\ndef get_user_info(access_token):\n amazonProfileURL = 'https://api.amazon.com/user/profile?access_token='\n r = requests.get(url=amazonProfileURL+access_token)\n if r.status_code == 200:\n return r.json()\n else:\n return False\n\n\n# --------------- Main handler ------------------\n\n\ndef lambda_handler(event, context):\n print(event)\n alexa_event = json.loads(event['Records'][0]['Sns']['Message'])\n print(alexa_event)\n intent = alexa_event['request']['intent']\n print(intent)\n service_name = intent['slots']['service']['value']\n service = findService(service_name)\n #html = requests.get(service['serviceUrl']).text\n #createPdf(service['serviceUrl'])\n emailAddress = get_user_info(event['context']['System']['user']['accessToken'])['email']\n message = {}\n message['To'] = emailAddress\n message['From'] = emailAddress\n message['Subject'] = 'Here is the service description for ' + service['serviceName']\n message['serviceUrl'] = service['serviceUrl']\n message['Body'] = build_email(service['serviceUrl'])\n resultResponse = push_sns(message, SNS_EMAIL_TOPIC)\n","sub_path":"services/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601869582","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n#Written by Christopher Ferrier for CHE 3102 Problem A2 Spring 2021\n\n#Problem 3-57 from 5th Edition Cengal\n#(Thermal Contact Resistance)\n#Inconel plate of 12mm coated with a 300 um layer of TBC\n#k = 10500 W/m2K at interface\n#k_inc = 25 W/mK\n#k_tbc = 1.5 W/mK\n#plate surrounded by combustion gas at 1773.15C\n#h = 750 W/m2K\n#Ts = 1473.15 K\n\n#Declaring constants\nh_int = 10500\nh_surr = 750\nk_inc = 25\nk_tbc = 1.5\nt_s = 1473.15\nt_surr = 1773.15\nl_plate = 12E-3\nl_tbc = 300E-6\nq_conv = h_surr*(t_surr-t_s)\n\n\n#Defining Functions\ndef total_resistance(platedepth):\n x = (platedepth/k_inc) + (1/h_int) + (l_tbc/k_tbc)\n return x\n\ndef platetemp(resistance):\n x = t_s - (q_conv*resistance)\n return x\n\n#Finding temperature at midpoint\nmidpoint_temp = platetemp(total_resistance(l_plate/2))\nprint(\"The temperature at the midpoint of the plate is \" + str(midpoint_temp) + \" K\")\n\n#Plotting temperature over full thickness of plane\n#Depth 0 is temp at interface\n#Last depth is temp on other side\ndepths = np.arange(0,12E-3+0.5E-4,0.5E-4)\ntemps = platetemp(total_resistance(depths))\n\n#Plotting temps of plate from interface to other side of plate\nplt.plot(depths,temps)\nplt.title('Temperature vs Plate Depth')\nplt.xlabel(\"Plate Depth (m)\")\nplt.ylabel('Temperature (K)')\nplt.show()\n\n\n\n","sub_path":"ProbA2.py","file_name":"ProbA2.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"41917151","text":"from control.user_input import UserInput\nfrom gui.main_window import MainWindow\nfrom gui.dialog_new_game import Ui_DialogNewGame\nfrom model.board import Board\nfrom model.piece import Piece\nfrom model.position import Position\nfrom PyQt5.QtWidgets import QDialog\nfrom model.ai_player import AIPlayer\n\nPLAYER_COLOR = {1: 'green', -1:'red'}\n\nclass Game(object):\n '''\n classdocs\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n main_window = MainWindow()\n self.dialog_new_game = QDialog()\n self.ui = Ui_DialogNewGame()\n self.ui.setupUi(self.dialog_new_game)\n\n self.user_input = UserInput(main_window, self)\n self.board = None\n self.over = False\n self.ai_player = None\n\n self.new_game()\n\n main_window.show()\n\n def new_game(self):\n self.dialog_new_game.exec()\n if not self.ui.is_player2_human:\n self.ai_player = AIPlayer('red', self.ui.difficulty)\n else:\n self.ai_player = None\n self.board = Board()\n self.over = False\n self.active_player = 1\n\n def put_piece_at(self, position):\n if self.board.put_piece(Piece(position, PLAYER_COLOR[self.active_player])):\n self.active_player = -self.active_player\n return True\n return False\n\n def check_game_end(self):\n result = self.board.verify_game_over()\n if result == 'end':\n self.over = True\n return result\n\n def ai_player_move(self):\n if self.ai_player and self.active_player == -1:\n piece = self.ai_player.get_move(self.board)\n pos = piece.get_position()\n self.user_input.ai_move(pos)\n pass\n pass\n\n def active_player_color(self):\n return PLAYER_COLOR[self.active_player]\n\n def get_active_player(self):\n return self.active_player\n\n def is_over(self):\n return self.over\n","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66828083","text":"import time as t\n\nfrom discord.ext import commands\n\nfrom src.Settings import Settings\nfrom configs import config, bot_enum, user_messages as u_msg\nfrom src.session import session_manager, session_controller, session_messenger, countdown, state_handler\nfrom src.session.Session import Session\nfrom src.utils import msg_builder\n\n\nclass Control(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def start(self, ctx, pomodoro=20, short_break=5, long_break=15, intervals=4):\n if not await Settings.is_valid(ctx, pomodoro, short_break, long_break, intervals):\n return\n if session_manager.active_sessions.get(session_manager.session_id_from(ctx.channel)):\n await ctx.send(u_msg.ACTIVE_SESSION_EXISTS_ERR)\n return\n if not ctx.author.voice:\n await ctx.send('Dołącz do kanału głosowego, aby korzystać z Pomi!')\n return\n\n session = Session(bot_enum.State.POMODORO,\n Settings(pomodoro, short_break, long_break, intervals),\n ctx)\n await session_controller.start(session)\n\n @start.error\n async def handle_error(self, ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send(u_msg.NUM_OUTSIDE_ONE_AND_MAX_INTERVAL_ERR)\n else:\n print(error)\n\n @commands.command()\n async def stop(self, ctx):\n session = await session_manager.get_session(ctx)\n if session:\n if session.stats.pomos_completed > 0:\n await ctx.send(f'Świetna robota! '\n f'Ukończyłeś {msg_builder.stats_msg(session.stats)}.')\n else:\n await ctx.send(f'Do zobaczenia wkrótce! 👋')\n await session_controller.end(session)\n\n @commands.command()\n async def pause(self, ctx):\n session = await session_manager.get_session(ctx)\n if session:\n timer = session.timer\n if not timer.running:\n await ctx.send('Timer wstrzymany.')\n return\n\n await session.auto_shush.unshush(ctx)\n timer.running = False\n timer.remaining = timer.end - t.time()\n await ctx.send(f'Wstrzymywanie {session.state}.')\n session.timeout = t.time() + config.PAUSE_TIMEOUT_SECONDS\n\n @commands.command()\n async def resume(self, ctx):\n session = await session_manager.get_session(ctx)\n if session:\n timer = session.timer\n if session.timer.running:\n await ctx.send('Timer uruchomiony.')\n return\n\n timer.running = True\n timer.end = t.time() + timer.remaining\n await ctx.send(f'Wznawianie {session.state}.')\n await session_controller.resume(session)\n\n @commands.command()\n async def restart(self, ctx):\n session = await session_manager.get_session(ctx)\n if session:\n session.timer.set_time_remaining()\n await ctx.send(f'Restartowanie {session.state}.')\n if session.state == bot_enum.State.COUNTDOWN:\n await countdown.start(session)\n else:\n await session_controller.resume(session)\n\n @commands.command()\n async def skip(self, ctx):\n session = await session_manager.get_session(ctx)\n if session.state == bot_enum.State.COUNTDOWN:\n ctx.send(f'Odliczeń nie można pomijać. '\n f'Użyj {config.CMD_PREFIX}stop, aby zakończyć lub {config.CMD_PREFIX}restart, aby zacząć od nowa.')\n if session:\n stats = session.stats\n if stats.pomos_completed >= 0 and \\\n session.state == bot_enum.State.POMODORO:\n stats.pomos_completed -= 1\n stats.minutes_completed -= session.settings.duration\n\n await ctx.send(f'Pomijanie {session.state}.')\n await state_handler.transition(session)\n await session_controller.resume(session)\n\n @commands.command()\n async def edit(self, ctx, pomodoro: int, short_break: int = None, long_break: int = None, intervals: int = None):\n session = await session_manager.get_session(ctx)\n if session.state == bot_enum.State.COUNTDOWN:\n ctx.send(f'Odliczeń nie można edytować. '\n f'Użyj {config.CMD_PREFIX}odliczanie do rozpoczęcia nowego.')\n if session:\n if not await Settings.is_valid(ctx, pomodoro, short_break, long_break, intervals):\n return\n await session_controller.edit(session, Settings(pomodoro, short_break, long_break, intervals))\n session.timer.set_time_remaining()\n if session.state == bot_enum.State.COUNTDOWN:\n await countdown.update_msg(session)\n await session_controller.resume(session)\n\n @edit.error\n async def handle_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(u_msg.MISSING_ARG_ERR)\n elif isinstance(error, commands.BadArgument):\n await ctx.send(u_msg.NUM_OUTSIDE_ONE_AND_MAX_INTERVAL_ERR)\n else:\n print(error)\n\n @commands.command()\n async def countdown(self, ctx, duration: int, title='Odliczanie', audio_alert=None):\n session = session_manager.active_sessions.get(session_manager.session_id_from(ctx.channel))\n if session:\n await ctx.send('Trwa aktywna sesja. '\n 'Czy na pewno chcesz rozpocząć odliczanie? (y/n)')\n response = await self.client.wait_for('message', timeout=60)\n if not response.content.lower()[0] == 'y':\n await ctx.send('OK, anuluję nowe odliczanie.')\n return\n\n if not 0 < duration <= 180:\n await ctx.send(u_msg.NUM_OUTSIDE_ONE_AND_MAX_INTERVAL_ERR)\n session = Session(bot_enum.State.COUNTDOWN,\n Settings(duration),\n ctx)\n await countdown.handle_connection(session, audio_alert)\n session_manager.activate(session)\n await session_messenger.send_countdown_msg(session, title)\n await countdown.start(session)\n\n @countdown.error\n async def handle_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(u_msg.MISSING_ARG_ERR)\n elif isinstance(error, commands.BadArgument):\n await ctx.send(u_msg.NUM_OUTSIDE_ONE_AND_MAX_INTERVAL_ERR)\n else:\n print(error)\n\n\ndef setup(client):\n client.add_cog(Control(client))\n","sub_path":"bot/cogs/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"50787415","text":"import pandas\n\nbank_dict = {}\nutilities_dict = {}\n\n\n# opens a file containg bank ID to bank name mapping and creates a dictionary out of it \n# later on \"sms_level1_classification_func.py\" will use this to segregate bank messages from all the messages\n\n\n# fp = open('data_files/sms_classification_level1_keywords/financial/bank_id_to_bank_name_mapping','r')\n# for bank in fp.read().split('\\n'):\n# \tif bank != '':\n# \t\tbank = bank.split('\\t')\n# \t\tbank_dict[bank[0]] = [ value for value in bank[1:] ]\n\ndf = pandas.read_csv('data_files/sms_classification_level1_keywords/financial/TABLE_20160704_SENDER_CLASSIFICATION_v02.csv')\ndf = df[df['SENDER_PARENT'] == 'FINANCIAL' ]\ndf.fillna('_NA_',inplace=True)\nfor i , row in df.iterrows():\n\tbank_dict[row['SENDER']] = [row['SENDER_NAME'],row['SENDER_PARENT'],row['SENDER_CHILD_1'],row['SENDER_CHILD_2'],row['SENDER_CHILD_3']]\n\n\n\n#print bank_dict\n\n\n\nfp = open('data_files/sms_classification_level1_keywords/utilities/sender_id_to_sender_name_mapping','r')\nfor utility in fp.read().split('\\n'):\n\tif utility != '':\n\t\tutility = utility.split('\\t')\n\t\tutilities_dict[utility[0]] = [ value for value in utility[1:]]\n\n","sub_path":"function_definitions/all_dict_generator.py","file_name":"all_dict_generator.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"103604730","text":"#!/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport sys\r\nimport codecs\r\nimport re\r\nimport configparser\r\nfrom urllib3 import util\r\n\r\n# Type of printing.\r\nOK = 'ok' # [*]\r\nNOTE = 'note' # [+]\r\nFAIL = 'fail' # [-]\r\nWARNING = 'warn' # [!]\r\nNONE = 'none' # No label.\r\n\r\n\r\nclass Inventory:\r\n def __init__(self, utility):\r\n # Read config.ini.\r\n self.utility = utility\r\n config = configparser.ConfigParser()\r\n self.file_name = os.path.basename(__file__)\r\n self.full_path = os.path.dirname(os.path.abspath(__file__))\r\n self.root_path = os.path.join(self.full_path, '../')\r\n config.read(os.path.join(self.root_path, 'config.ini'))\r\n\r\n try:\r\n self.signature_dir = os.path.join(self.root_path, config['Common']['signature_path'])\r\n self.black_list_path = os.path.join(self.signature_dir, config['Inventory']['black_list'])\r\n self.black_list = []\r\n if os.path.exists(self.black_list_path) is False:\r\n self.black_list = []\r\n else:\r\n with codecs.open(self.black_list_path, 'r', encoding='utf-8') as fin:\r\n self.black_list = fin.readlines()\r\n self.max_search_num = int(config['Inventory']['max_search_num'])\r\n self.jprs_url = config['Inventory']['jprs_url']\r\n self.jprs_post = {'type': 'DOM-HOLDER', 'key': ''}\r\n self.jpnic_url = config['Inventory']['jpnic_url']\r\n self.jpnic_post = {'codecheck-sjis': 'にほんねっとわーくいんふぉめーしょんせんたー',\r\n 'key': '', 'submit': '検索', 'type': 'NET-HOLDER', 'rule': ''}\r\n except Exception as e:\r\n self.utility.print_message(FAIL, 'Reading config.ini is failure : {}'.format(e))\r\n self.utility.write_log(40, 'Reading config.ini is failure : {}'.format(e))\r\n sys.exit(1)\r\n\r\n # Check black list.\r\n def check_black_list(self, fqdn_list):\r\n for idx, fqdn in enumerate(fqdn_list):\r\n for exclude_fqdn in enumerate(self.black_list):\r\n if fqdn == exclude_fqdn.replace('\\n', '').replace('\\r', ''):\r\n del fqdn_list[idx]\r\n self.utility.print_message(WARNING, '\"{}\" is include black list.'.format(fqdn))\r\n return fqdn_list\r\n\r\n # Explore relevant link.\r\n def link_explorer(self, spider, google_hack, target_url, keyword, encoding):\r\n self.utility.print_message(NOTE, 'Explore relevant FQDN.')\r\n self.utility.write_log(20, '[In] Explore relevant FQDN [{}].'.format(self.file_name))\r\n\r\n parsed = util.parse_url(target_url)\r\n\r\n # Gather FQDN from link of target web site.\r\n link_fqdn_list = []\r\n spider.utility.encoding = encoding\r\n _, url_list = spider.run_spider(parsed.scheme, parsed.host, parsed.port, parsed.path)\r\n for url in url_list:\r\n parsed = util.parse_url(url)\r\n link_fqdn_list.append(parsed.host)\r\n link_fqdn_list = self.check_black_list(list(set(link_fqdn_list)))\r\n\r\n # Search FQDN that include link to the target FQDN using Google Custom Search.\r\n non_reverse_link_fqdn = []\r\n for del_idx, search_fqdn in enumerate(link_fqdn_list):\r\n # Check reverse link to target FQDN.\r\n if google_hack.search_relevant_fqdn(parsed.host, search_fqdn) is False:\r\n non_reverse_link_fqdn.append(link_fqdn_list[del_idx])\r\n del link_fqdn_list[del_idx]\r\n\r\n # Search related FQDN using Google Custom Search.\r\n related_fqdn_list = []\r\n for url in google_hack.search_related_fqdn(parsed.host, keyword, self.max_search_num):\r\n parsed = util.parse_url(url)\r\n related_fqdn_list.append(parsed.host)\r\n related_fqdn_list = self.check_black_list(list(set(related_fqdn_list)))\r\n\r\n self.utility.write_log(20, '[Out] Explore relevant FQDN [{}].'.format(self.file_name))\r\n return list(set(link_fqdn_list.extend(related_fqdn_list))), non_reverse_link_fqdn\r\n\r\n # Explore Domain.\r\n def domain_explore(self, google_hack, keyword):\r\n self.utility.print_message(NOTE, 'Explore relevant domain.')\r\n self.utility.write_log(20, '[In] Explore relevant domain [{}].'.format(self.file_name))\r\n\r\n # Explore domain using JPRS.\r\n self.utility.print_message(OK, 'Explore domain from JPRS.')\r\n domain_list = []\r\n self.jprs_post['key'] = keyword\r\n res, _, _, res_body, _ = self.utility.send_request('POST',\r\n self.jprs_url,\r\n body_param=self.jprs_post)\r\n if res.status == 200:\r\n domain_list = re.findall(r'{}.*\\s*(.*)[\\r\\n]'.format(keyword), res_body)\r\n if len(domain_list) == 0:\r\n domain_list = re.findall(r'\\[ドメイン名\\]\\s+([\\w\\Wa-zA-Z\\.].*)[\\r\\n]', res_body)\r\n if len(domain_list) != 0:\r\n self.utility.print_message(NOTE, 'Gathered domain from JPRS. : {}'.format(domain_list))\r\n else:\r\n self.utility.print_message(WARNING, 'Could not gather domain from JPRS.')\r\n else:\r\n self.utility.print_message(NOTE, 'Gathered domain from JPRS. : {}'.format(domain_list))\r\n\r\n # Explore FQDN using gathered domain list.\r\n fqdn_list = []\r\n # for domain in list(set(domain_list)):\r\n # fqdn_list.extend(google_hack.search_domain(domain.lower(), self.max_search_num))\r\n\r\n fqdn_list = list(set(fqdn_list))\r\n jprs_fqdn_list = []\r\n for url in fqdn_list:\r\n parsed = util.parse_url(url)\r\n jprs_fqdn_list.append(parsed.host)\r\n jprs_fqdn_list = list(set(jprs_fqdn_list))\r\n\r\n # Explore domain using JPNIC.\r\n domain_list = []\r\n self.jpnic_post['key'] = keyword\r\n res, _, _, res_body, _ = self.utility.send_request('POST',\r\n self.jpnic_url,\r\n body_param=self.jpnic_post,\r\n enc='shift_jis')\r\n if res.status == 200:\r\n domain_list = re.findall(r'{}.*\\s.*(.*)[\\r\\n]'.format(keyword), res_body, flags=re.IGNORECASE)\r\n\r\n # Explore FQDN using gathered domain list.\r\n fqdn_list = []\r\n for domain in list(set(domain_list)):\r\n fqdn_list.extend(google_hack.search_domain(domain.lower(), self.max_search_num))\r\n\r\n self.utility.write_log(20, '[Out] Explore relevant domain [{}].'.format(self.file_name))\r\n return list(set(fqdn_list))\r\n","sub_path":"modules/Gyoi_Inventory.py","file_name":"Gyoi_Inventory.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454360554","text":"'''\nThe Sum of the squares of the first ten natural numbers and the square of the sum is 3025-385=2640.\nFind the difference between the sum of the squares of the first 100 natural numbers and the square of the sum.\n'''\nTotal=0\nsingleTotal=0\nsquareTotal=0\nfor i in xrange(100,0,-1):\n Total+=i\n squareTotal+=(i*i)\nsingleTotal=(Total*Total)\nprint(\"Single Total=%d,Square Total=%d\")%(singleTotal,squareTotal)\nprint(\"Difference=\"+str(singleTotal-squareTotal))","sub_path":"ProjectEuler/Problems/problem06.py","file_name":"problem06.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634449429","text":"import fuzzylite as fl\n\n#Declaring and Initializing the Fuzzy Engine\nengine = fl.Engine(\n name=\"SimpleDimmer\",\n description=\"Simple Dimmer Fuzzy System which dims light based upon Light Conditions\"\n)\n\n#Defining the Input Variables (Fuzzification)\nengine.input_variables = [\n fl.InputVariable(\n name=\"Ambient\",\n description=\"\",\n enabled=True,\n minimum=0.000,\n maximum=1.000,\n lock_range=False,\n terms=[\n fl.Bell(\"DARK\", -10.000, 5.000, 3.000), #Generalized Bell Membership Function defining \"Dark\"\n fl.Bell(\"MEDIUM\", 0.000, 5.000, 3.000), #Generalized Bell Membership Function defining \"Medium\"\n fl.Bell(\"BRIGHT\", 10.000, 5.000, 3.000) #Generalized Bell Membership Function defining \"Bright\"\n ]\n )\n]\n\n#Defining the Output Variables (Defuzzification)\nengine.output_variables = [\n fl.OutputVariable(\n name=\"Power\",\n description=\"\",\n enabled=True,\n minimum=0.000,\n maximum=1.000,\n lock_range=False,\n aggregation=fl.Maximum(),\n defuzzifier=fl.Centroid(200),\n lock_previous=False,\n terms=[\n fl.Sigmoid(\"LOW\", 0.500, -30.000), #Triangular Membership Function defining \"LOW Light\"\n fl.Sigmoid(\"MEDIUM\", 0.130, 30.000), #Triangular Membership Function defining \"MEDIUM light\"\n fl.Sigmoid(\"HIGH\", 0.830, 30.000), #Triangular Membership Function defining \"HIGH Light\"\n fl.Triangle(\"BRIGHT\", 0.500, 0.750, 1.000) #Triangular Membership Function defining \"Bright\"\n ]\n )\n]\n\n#Creation of Fuzzy Rule Base\nengine.rule_blocks = [\n fl.RuleBlock(\n name=\"\",\n description=\"\",\n enabled=True,\n conjunction=None,\n disjunction=None,\n implication=None,\n activation=fl.General(),\n rules=[\n fl.Rule.create(\"if Ambient is DARK then Power is HIGH\", engine),\n fl.Rule.create(\"if Ambient is MEDIUM then Power is MEDIUM\", engine),\n fl.Rule.create(\"if Ambient is BRIGHT then Power is LOW\", engine)\n ]\n )\n]\n","sub_path":"DNFS_124.py","file_name":"DNFS_124.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125926732","text":"# Django\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.safestring import mark_safe\n\n\nclass TurboStreamRemoveResponse(HttpResponse):\n \"\"\"Sends an empty 'remove' stream\"\"\"\n\n def __init__(self, target, **kwargs):\n super().__init__(\n f'',\n content_type=\"text/html; turbo-stream;\",\n )\n\n\nclass TurboStreamTemplateResponse(TemplateResponse):\n def __init__(self, request, template, context, action, target, **kwargs):\n\n super().__init__(\n request,\n template,\n context,\n content_type=\"text/html; turbo-stream;\",\n **kwargs,\n )\n\n self._target = target\n self._action = action\n\n self.context_data.update(\n {\n \"turbo_stream_target\": target,\n \"turbo_stream_action\": action,\n \"is_turbo_stream\": True,\n }\n )\n\n @property\n def rendered_content(self):\n content = super().rendered_content\n start_tag = mark_safe(\n f'\")\n return start_tag + content + end_tag\n\n\nclass TurboFrameTemplateResponse(TemplateResponse):\n def __init__(self, request, template, context, dom_id, **kwargs):\n\n super().__init__(\n request, template, context, **kwargs,\n )\n\n self._dom_id = dom_id\n self.context_data.update({\"turbo_frame_dom_id\": dom_id, \"is_turbo_frame\": True})\n\n @property\n def rendered_content(self):\n content = super().rendered_content\n start_tag = mark_safe(f'')\n end_tag = mark_safe(\"\")\n return start_tag + content + end_tag\n","sub_path":"radiofeed/common/turbo/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564797312","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport argparse\nimport glob\nimport multiprocessing as mp\nimport os\nimport time\nimport cv2\nimport tqdm\n\nfrom detectron2.config import get_cfg\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.utils.logger import setup_logger\n\nfrom predictor import VisualizationDemo\n\nimport json\n\n# constants\nWINDOW_NAME = \"COCO detections\"\n\ncount = 0\n\ndef setup_cfg(args):\n # load config from file and command-line arguments\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # Set score_threshold for builtin models\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold\n cfg.freeze()\n return cfg\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Detectron2 Demo\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/quick_schedules/e2e_mask_rcnn_R_50_FPN_inference_acc_test.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--webcam\", action=\"store_true\", help=\"Take inputs from webcam.\")\n parser.add_argument(\"--video-input\", help=\"Path to video file.\")\n parser.add_argument(\"--input\", nargs=\"+\", help=\"A list of space separated input images\")\n parser.add_argument(\n \"--output\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n\n parser.add_argument(\n \"--confidence-threshold\",\n type=float,\n default=0.5,\n help=\"Minimum score for instance predictions to be shown\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify model config options using the command-line\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\ndef create_annotation(image_folder, json_path, confidence_thresh = 0.8):\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [], \"categories\": []}\n\n mp.set_start_method(\"spawn\", force=True)\n args = get_parser().parse_args()\n logger = setup_logger()\n logger.info(\"Arguments: \" + str(args))\n\n cfg = setup_cfg(args)\n\n demo = VisualizationDemo(cfg)\n\n image_path = {}\n for path, subdirs, files in os.walk(image_folder):\n for name in files:\n print(name)\n if name.endswith('.jpg') or \\\n name.endswith('.png') or \\\n name.endswith('.JPG') or \\\n name.endswith('.PNG') or \\\n name.endswith('.jpeg') or \\\n name.endswith('.JPEG'):\n image_path[name] = os.path.join(path, name)\n\n print(\"length: \", len(image_path.keys()))\n for path in tqdm.tqdm(image_path.keys(), disable=not args.output):\n # use PIL, to be consistent with evaluation\n start_time = time.time()\n try:\n img = read_image(image_path[path], format=\"BGR\")\n # run detector\n predictions, visualized_output, shape = demo.run_on_image(img)\n except:\n print(\"except\")\n continue\n height, width, channel = shape\n\n global count\n ## append image info\n image = {\n \"file_name\": str(path),\n \"height\": str(height),\n \"width\": str(width),\n \"id\": str(count),\n }\n count += 1\n # if count > 10:\n # break\n json_dict[\"images\"].append(image)\n ## append annotation info\n bnd_id = 0\n for i in range(len(predictions[\"instances\"].pred_boxes)):\n if predictions[\"instances\"].scores[i] > confidence_thresh and predictions[\"instances\"].pred_classes[i] in [0, 2, 5, 7]:\n # print(predictions[\"instances\"].pred_boxes[i].tensor)\n x_center, y_center, o_width, o_height = predictions[\"instances\"].pred_boxes[i].tensor[0].cpu().detach().numpy()\n score = predictions[\"instances\"].scores[i].cpu().detach().numpy()\n pred_class = predictions[\"instances\"].pred_classes[i].cpu().detach().numpy()\n\n # print(x_center, y_center, o_width, o_height, score)\n ann = {\n \"area\": str(o_width * o_height),\n \"iscrowd\": 0,\n \"image_id\": str(count),\n \"bbox\": [str(int(x_center - o_width / 2)), str(int(y_center - o_height / 2)), str(o_width), str(o_height)],\n \"category_id\": str(pred_class + 1),\n \"id\": str(bnd_id),\n \"ignore\": 0,\n \"segmentation\": [],\n }\n bnd_id += 1\n json_dict[\"annotations\"].append(ann)\n\n # cat = {\"supercategory\": \"none\", \"id\": cid, \"name\": cate}\n # json_dict[\"categories\"].append(cat)\n\n # if args.output:\n # if os.path.isdir(args.output):\n # assert os.path.isdir(args.output), args.output\n # out_filename = os.path.join(args.output, os.path.basename(path))\n # else:\n # assert len(args.input) == 1, \"Please specify a directory with args.output\"\n # out_filename = args.output\n # visualized_output.save(out_filename)\n # print(\"pred_boxes: \", predictions[\"instances\"].pred_boxes)\n # print(\"scores: \", predictions[\"instances\"].scores)\n # print(\"pred_classes: \", predictions[\"instances\"].pred_classes)\n # print(\"shape: \", width, height, channel)\n # logger.info(\n # \"{}: detected {} instances in {:.2f}s\".format(\n # path, len(predictions[\"instances\"]), time.time() - start_time\n # )\n # )\n logger.info((\"progress: {:.0f} / {:.0f}\".format(count, len(image_path.keys()))))\n\n ## append category info\n cat = {\"supercategory\": \"none\", \"id\": str(1), \"name\": \"person\"}\n json_dict[\"categories\"].append(cat)\n cat = {\"supercategory\": \"none\", \"id\": str(3), \"name\": \"car\"}\n json_dict[\"categories\"].append(cat)\n cat = {\"supercategory\": \"none\", \"id\": str(6), \"name\": \"bus\"}\n json_dict[\"categories\"].append(cat)\n cat = {\"supercategory\": \"none\", \"id\": str(8), \"name\": \"truck\"}\n json_dict[\"categories\"].append(cat)\n\n os.makedirs(os.path.dirname(json_path), exist_ok=True)\n json_fp = open(json_path, \"w\")\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\ndef main():\n json_path = \"/work/20191107_coco.json\"\n image_folder = \"/media/fanerror/Elements SE/20191107_maachine_annotation/\"\n create_annotation(image_folder, json_path)\n\nif __name__ == \"__main__\":\n main()","sub_path":"demo/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"103613894","text":"from __future__ import unicode_literals, print_function\r\n\r\nimport random\r\nimport spacy\r\nfrom spacy.util import minibatch, compounding\r\nfrom ast import literal_eval\r\nimport os.path\r\nimport logging\r\n\r\nlog = logging.getLogger('file')\r\n\r\nclass ScNerAnnotation(object):\r\n # model paths for judgment order and mix of both\r\n def __init__(self, model_dir_judgment, model_dir_order, mix_model_dir, page_text):\r\n self.model_dir_judgment = model_dir_judgment\r\n self.model_dir_order = model_dir_order\r\n self.mix_model_dir = mix_model_dir\r\n self.page_text = page_text\r\n\r\n # model loading \r\n def loading_model(self, model_dir):\r\n try:\r\n if model_dir is not None and os.path.exists(model_dir) is True:\r\n nlp = spacy.load(model_dir)\r\n return nlp\r\n except FileExistsError:\r\n raise FileExistsError\r\n \r\n # entity tagging of page for document model seggregation\r\n def pagewise_entity_tags(self, doc):\r\n pagewise_tags = list()\r\n for ent in doc.ents:\r\n pagewise_tags.append(ent.label_)\r\n return pagewise_tags\r\n\r\n # condition check to choose model\r\n def condition_check(self, sub_list_tag, full_list_tag):\r\n for x in sub_list_tag:\r\n if x in full_list_tag:\r\n return True\r\n\r\n # order document model upload and and tag data\r\n def order_tagged_data(self):\r\n nlp = self.loading_model(self.model_dir_order)\r\n doc = nlp(self.page_text)\r\n result_order_ner = list()\r\n for ent in doc.ents:\r\n annotation_json={\r\n \"annotation_tag\" : ent.label_,\r\n \"tagged_value\" : ent.text\r\n }\r\n result_order_ner.append(annotation_json)\r\n return {\r\n \"page_ner\" : result_order_ner,\r\n \"document_type\" : \"order_doc\"\r\n }\r\n # judgment document model upload and and tag data\r\n def judgment_tagged_data(self):\r\n nlp = self.loading_model(self.model_dir_judgment)\r\n doc = nlp(self.page_text)\r\n result_judgment_ner = list()\r\n for ent in doc.ents:\r\n annotation_json={\r\n \"annotation_tag\" : ent.label_,\r\n \"tagged_value\" : ent.text\r\n }\r\n result_judgment_ner.append(annotation_json)\r\n return {\r\n \"page_ner\" : result_judgment_ner,\r\n \"document_type\" : \"judgment_doc\"\r\n }\r\n\r\n def main(self):\r\n try:\r\n nlp = self.loading_model(self.mix_model_dir)\r\n doc = nlp(self.page_text)\r\n pagewise_tags = self.pagewise_entity_tags(doc)\r\n first_page_tag = ['O_ITEM_NO', 'O_COURT_NO', 'O_SECTION']\r\n last_page_tag = ['O_ORDER_OFFICER','O_ORDER_OFFICER_NAME']\r\n middle_page_tag = ['O_CORAM','O_HEARING_DATE','O_CONDONATION_DELAY_EXEMPTION','O_COURT_COUNSEL_HEARING','O_COUNSEL_NAME']\r\n if self.condition_check(first_page_tag, pagewise_tags) is True or self.condition_check(middle_page_tag, pagewise_tags) is True or self.condition_check(last_page_tag, pagewise_tags):\r\n result_ner = self.order_tagged_data()\r\n log.info(\"NER done!!\")\r\n return result_ner\r\n else:\r\n result_ner = self.judgment_tagged_data()\r\n log.info(\"NER done!!\")\r\n return result_ner\r\n except Exception as e:\r\n log.error(\"error occured during ner operation %s\"%e)\r\n","sub_path":"anuvaad-etl/anuvaad-extractor/ner/src/repositories/sc_judgment_header_ner_eval.py","file_name":"sc_judgment_header_ner_eval.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"37572726","text":"# a melodic riff is a series of notes where the next note plays after the duration of the previous note\n# there are exceptions of course, but this is the basic melodic riff\nfrom key import Key\nfrom note import Note\nfrom random import Random\n\nMAX_DURATION = 3\nMIN_DURATION = 1\n\n\nclass Riff:\n\n key: Key\n notes: list # of Note objects\n track: int\n channel: int\n\n def __init__(self, key: Key, notes = [], track = 0, channel = 0):\n self.key = key\n self.notes = notes\n self.track = track\n self.channel = channel\n\n # Print Helper Functions\n def print_notes(self, detailed: bool):\n print('---------- Printing Notes ----------')\n print('Riff Name: ') # Add the Root Note modded by whatever to find the right note\n if detailed:\n for i, note in enumerate(self.notes):\n Note.print_note_all(self.notes[i])\n else:\n for i, note in enumerate(self.notes):\n Note.print_note_pitch(self.notes[i])\n print('------------------------------------')\n\n\nclass MelodicRiff(Riff):\n\n def __init__(self, key: Key, notes = [], track = 0, channel = 0):\n Riff.__init__(self, key, notes, track, channel)\n\n def create_riff(self, riff_length: int, _input = ''):\n time = 0\n while time < riff_length:\n duration = Random.randint(Random(), MIN_DURATION, MAX_DURATION)\n # half a beat\n if duration == 3:\n duration = 0.5\n if duration + time > riff_length:\n continue\n # elif duration == 4:\n # duration = 0.25\n new_pitch = self.key.get_random_pitch()\n list.append(self.notes, Note(new_pitch, duration, time))\n time += duration\n\n\nclass HarmonicRiff(Riff):\n\n def __init__(self, key: Key):\n Riff.__init__(self, key)\n","sub_path":"riff.py","file_name":"riff.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422227093","text":"import random\nimport time\ndef mergeSort(vetor):\n n = len(vetor)\n esquerda = vetor[:n//2]\n direita = vetor[n//2:]\n\n if n > 1:\n meio = n//2\n for i in range(meio):\n esquerda[i] = vetor[i]\n for j in range(meio, n-1):\n direita[j-meio] = vetor [j]\n\n mergeSort(esquerda)\n mergeSort(direita)\n merge(vetor,esquerda,direita)\n return vetor \n\ndef merge(vetor, esquerda, direita):\n nE = len(esquerda)\n nD = len(direita)\n i = 0\n j = 0\n k = 0\n while i< nE and j < nD:\n if esquerda[i] <= direita [j]:\n vetor[k] = esquerda [i]\n i+=1\n else:\n vetor[k] = direita[j]\n j+=1\n k+=1\n while i < nE:\n vetor[k] = esquerda[i]\n i+=1\n k+=1\n while j < nD:\n vetor[k] = direita [j]\n j+=1\n k+=1\n \ndef main():\n escolha = int(input(\"Tamanho do vetor -> \"))\n x = 0\n vetor = []\n while x < escolha:\n vetor.append(random.randint(0,100))\n x+=1\n print(vetor)\n a = mergeSort(vetor)\n print(\"Enviando seu vetor...\")\n time.sleep(2)\n print(\"Contando o número de posições.\")\n time.sleep(2)\n print(\"Organizando...\")\n time.sleep(2)\n print(\"Seu vetor organizado: \")\n time.sleep(1.5)\n print(a)\n\nmain()\n","sub_path":"MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56772884","text":"\"\"\"\nID: sashrik1\nLANG: PYTHON3\nTASK: whereami\n\"\"\"\n\nnotTheseLengths = []\n\n\ndef isUniqueString(test_str):\n count = 0\n unique_str_bool = True\n unique_times = 0 # should be at least 1 since the test_str will show up once in mailboxes_string\n\n while (count + len(test_str) - 1) < len(mailboxes_string):\n temp_str = mailboxes_string[count:(count + len(test_str))]\n if test_str == temp_str:\n unique_times += 1\n count += 1\n\n if unique_times > 1:\n unique_str_bool = False\n notTheseLengths.append(len(test_str))\n\n return unique_str_bool\n\n\nfin = open('whereami.in', 'r')\nfout = open('whereami.out', 'w')\n\nnum_mailboxes = int(fin.readline())\nmailboxes_string = fin.readline()\n\nmin_len_list = []\nmin_len = len(mailboxes_string)\n\nif len(mailboxes_string) == 0:\n min_len = 0\nelif len(mailboxes_string) == 1:\n min_len = 1\nelse:\n start_pos = 0\n while start_pos < num_mailboxes:\n end_pos = 0\n while end_pos < num_mailboxes:\n curr_string = mailboxes_string[start_pos:(start_pos + end_pos + 1)]\n if isUniqueString(curr_string):\n if len(curr_string) < min_len:\n min_len_list.append(len(curr_string))\n end_pos += 1\n start_pos += 1\n\n # only obtain unique values\n notTheseLengths_final = []\n i = 0\n notTheseLengths_final.append(notTheseLengths[i])\n while i < len(notTheseLengths):\n if notTheseLengths[i] not in notTheseLengths_final:\n notTheseLengths_final.append(notTheseLengths[i])\n i += 1\n\n min_len_list_final = []\n j = 0\n notTheseLengths_final.append(notTheseLengths[j])\n while j < len(min_len_list):\n if min_len_list[j] not in min_len_list_final:\n min_len_list_final.append(min_len_list[j])\n j += 1\n\n compare_list = []\n k = 0\n while k < len(min_len_list_final):\n if min_len_list_final[k] not in notTheseLengths:\n compare_list.append(min_len_list_final[k])\n k += 1\n\n min_len = min(compare_list)\n\nfout.write(str(min_len) + '\\n')\nfout.close()\n","sub_path":"dec2019_bronze/whereami.py","file_name":"whereami.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268342284","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom launcher import app\n\nfrom PyQt5.QtCore import QObject, pyqtSlot, QMetaType, QUrl\nfrom PyQt5.QtDBus import QDBusConnection, QDBusInterface, QDBusArgument, QDBusMessage\nfrom PyQt5.QtGui import QDesktopServices\nfrom PyQt5.QtMultimedia import QSound\n\nimport os\n\n_DBUS_NOTIFY_SERVICE = \"org.freedesktop.Notifications\"\n_DBUS_NOTIFY_PATH = \"/org/freedesktop/Notifications\"\n_DBUS_NOTIFY_INTERFACE = \"org.freedesktop.Notifications\"\n\n\nclass Notifier(QObject):\n _conn = None\n _interface = None\n _notifications = None # a dict of notifyId: taskDict\n _capabilities = None\n _completedTasksStat = None\n\n def __init__(self, parent):\n super().__init__(parent)\n self._conn = QDBusConnection(\"Xware Desktop\").sessionBus()\n\n self._interface = QDBusInterface(_DBUS_NOTIFY_SERVICE,\n _DBUS_NOTIFY_PATH,\n _DBUS_NOTIFY_INTERFACE,\n self._conn)\n\n self._notifications = {}\n self._completedTasksStat = app.etmpy.completedTasksStat\n self._completedTasksStat.sigTaskCompleted.connect(self.notifyTask)\n\n self._capabilities = self._getCapabilities()\n if \"actions\" in self._capabilities:\n successful = self._conn.connect(_DBUS_NOTIFY_SERVICE,\n _DBUS_NOTIFY_PATH,\n _DBUS_NOTIFY_INTERFACE,\n \"ActionInvoked\", self.slotActionInvoked)\n if not successful:\n logging.error(\"ActionInvoked connect failed.\")\n\n self._qSound_complete = QSound(\":/sound/download-complete.wav\", self)\n\n @property\n def isConnected(self):\n return self._conn.isConnected()\n\n def notifyTask(self, taskId):\n task = self._completedTasksStat.getTask(taskId)\n\n if task.get(\"state\", None) == 11: # see definitions in class TaskStatistic.\n if app.settings.getbool(\"frontend\", \"notifybysound\"):\n self._qSound_complete.play()\n self._dbus_notify(task)\n else:\n # TODO: Also notify if errors occur\n pass\n\n def _getCapabilities(self):\n # get libnotify server caps and remember it.\n qdBusMsg = self._interface.call(\n \"GetCapabilities\"\n )\n if qdBusMsg.errorName():\n logging.error(\"cannot get org.freedesktop.Notifications.GetCapabilities\")\n return []\n else:\n return qdBusMsg.arguments()[0]\n\n def _dbus_notify(self, task):\n if not app.settings.getbool(\"frontend\", \"popnotifications\"):\n return\n\n if \"actions\" in self._capabilities:\n actions = QDBusArgument([\"open\", \"打开\", \"openDir\", \"打开文件夹\"], QMetaType.QStringList)\n else:\n actions = QDBusArgument([], QMetaType.QStringList)\n\n qdBusMsg = self._interface.call(\n \"Notify\",\n QDBusArgument(\"Xware Desktop\", QMetaType.QString), # app_name\n QDBusArgument(0, QMetaType.UInt), # replace_id\n QDBusArgument(\"xware-desktop\", QMetaType.QString), # app_icon\n QDBusArgument(\"下载完成\", QMetaType.QString), # summary\n QDBusArgument(task[\"name\"], QMetaType.QString), # body\n actions,\n {\n \"category\": \"transfer.complete\",\n }, # hints\n QDBusArgument(5000, QMetaType.Int), # timeout\n )\n\n if qdBusMsg.errorName():\n logging.error(\"DBus, notifyTask {}: {}\".format(qdBusMsg.errorName(),\n qdBusMsg.errorMessage()))\n else:\n # add it to the dict\n self._notifications[qdBusMsg.arguments()[0]] = task\n\n @pyqtSlot(QDBusMessage)\n def slotActionInvoked(self, msg):\n notifyId, action = msg.arguments()\n task = self._notifications.get(notifyId, None)\n if not task:\n # other applications' notifications\n return\n name = task[\"name\"] # filename\n path = task[\"path\"] # location\n\n if action == \"open\":\n openPath = os.path.join(path, name)\n elif action == \"openDir\":\n openPath = path\n elif action == \"default\": # Unity's notify osd always have a default action.\n return\n else:\n raise Exception(\"Unknown action from slotActionInvoked: {}.\".format(action))\n\n localOpenPath = app.mountsFaker.convertToLocalPath(openPath)\n qUrl = QUrl.fromLocalFile(localOpenPath)\n QDesktopServices().openUrl(qUrl)\n","sub_path":"src/frontend/Notify/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141050353","text":"GET_BOXES, GET_BOXES_BY_ROOM, GET_ROOMS, GET_BUILDINGS, GET_SETTINGS, BACK = \"GET_BOXES\", \"GET_BOXES_BY_ROOM\",\\\n \"GET_ROOMS\", \"GET_BUILDINGS\",\\\n \"GET_SETTINGS\", \"BACK\"\n\nCHANGE_YEAR, CHANGE_MONTH = \"CHANGE_YEAR\", \"CHANGE_MONTH\"\nPREV_MONTH, NEXT_MONTH = \"PREV_MONTH\", \"NEXT_MONTH\"\nGET_PLOTTING_MENU = \"GET_PLOTTING_MENU\"\nSTART_DATE_INPUT = \"START_DATE_INPUT\"\nEND_DATE_INPUT = \"END_DATE_INPUT\"\nGET_SUPPORT = \"GET_SUPPORT\"\n","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307224854","text":"#!/usr/bin/python3\n\"\"\"\nendpoint (route) that will be to return the API status\n\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import Flask\nfrom models import storage\nfrom flask import make_response\nfrom flask import jsonify\nfrom os import getenv\nfrom flask_cors import CORS\n\nif getenv(\"HBNB_API_HOST\") is None:\n HBNB_API_HOST = '0.0.0.0'\nelse:\n HBNB_API_HOST = getenv(\"HBNB_API_HOST\")\nif getenv(\"HBNB_API_PORT\") is None:\n HBNB_API_PORT = '5000'\nelse:\n HBNB_API_PORT = getenv(\"HBNB_API_PORT\")\n\napp = Flask(__name__)\napp.register_blueprint(app_views)\napp.strict_slashes = False\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"0.0.0.0\"}})\n\n\n@app.teardown_appcontext\ndef show_teardown(exception):\n \"\"\"method to handle teardown\"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef not_found(error):\n \"\"\"handler for 404 errors that returns a JSON-formatted 404 response\"\"\"\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\nif __name__ == '__main__':\n app.run(host=HBNB_API_HOST, port=HBNB_API_PORT, threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"296883268","text":"# Other creatures and characters\n\nclass Humanoid:\n def __init__(self, name, current_room):\n self.name = name\n self.current_room = current_room\n\n\nclass Enemy(Humanoid):\n def __init__(self, name, current_room, health, weapon, armor):\n super().__init__(name, current_room)\n self.health = health\n self.weapon = weapon\n self.armor = armor\n\nclass NPC(Humanoid):\n def __init__(self, name, current_room, money):\n super().__init__(name, current_room)\n self.inventory = []\n self.money = money\n\n# Testing\n# skeleton = Enemy(\"Skeleton\", \"Clearing\", 20, \"Rusty Sword\", \"None\")\n# print(skeleton.name, skeleton.armor)\n\n# blacksmith = NPC(\"Blacksmith\", \"Blacksmith Shop\",\n# [\"Silver Sword\", \"Iron Armor\"], 50)\n\n# print(blacksmith.inventory, blacksmith.money)","sub_path":"characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"35128524","text":"\"\"\"\nevent 线程同步互斥方法\n\"\"\"\n\nfrom threading import Thread,Event\n\ns = None\ne = Event() # event对象\n\n# 线程函数\ndef 杨子荣():\n print(\"杨子荣前来拜山头\")\n global s\n s = \"天王盖地虎\"\n e.set() # 解除主线程的阻塞\n\nt = Thread(target=杨子荣)\nt.start()\n\ne.wait() # 阻塞等待\nif s == \"天王盖地虎\":\n print(\"宝塔镇河妖\")\n print(\"确认过眼神,你是对的人\")\nelse:\n print(\"打死他,无情啊\")\n\nt.join()\n","sub_path":"thread_event.py","file_name":"thread_event.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"201739133","text":"user1 = input(\"Enter first username: \")\nuser2 = input(\"Enter second username: \")\ni='yes'\nwhile(i=='yes'):\n user1input = input(user1+\" choose rock/paper/scissors: \").lower()\n user2input = input(user2+\" choose rock/paper/scissors: \").lower()\n if ((user1input not in {'rock','paper','scissors'}) or (user2input not in {'rock','paper','scissors'})):\n print(\"Invalid input\")\n elif user1input==user2input:\n print(\"It's a tie!\")\n elif user1input == 'rock':\n if user2input == 'paper':\n print(user2+\" wins!\")\n else: print(user1+ \" wins!\")\n elif user1input == 'paper' :\n if user2input == 'rock':\n print(user1+\" wins!\")\n else: print(user2+\" wins!\")\n elif user1input == 'scissors' :\n if user2input == 'rock':\n print(user2+\" wins!\")\n else: print(user1+\" wins!\")\n j=input(\"do you want to play another round, choose yes/no: \")\n i=j","sub_path":"Python/Activities/Activity4.py","file_name":"Activity4.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"364156224","text":"class SubTitleItem:\n\n '''a class to restore subtitle item'''\n\n def __init__(self, order, start_time, end_time, contents):\n self.order = order\n self.start_time = start_time\n self.end_time = end_time\n self.contents = contents\n\n\n def set_order(self, order):\n self.order = order\n\n def generate_writable_contents(self):\n items = []\n items.append(repr(self.order) + '\\n')\n items.append(self.start_time + ' --> ' + self.end_time + '\\n')\n for c in self.contents:\n items.append(c + '\\n')\n items.append('\\n')\n return items\n\n\nclass SubTitle:\n\n def __init__(self, path):\n self.subtitle_items = []\n self.ordered_subtitle_items = []\n with open(path, 'r', encoding='utf-8') as srt_file:\n order = start_time = end_time = None\n contents = []\n for line in srt_file:\n line = line.strip()\n if not line:\n if not contents and not start_time and not order:\n continue\n else:\n item = SubTitleItem(order, start_time, end_time, contents)\n self.subtitle_items.append(item)\n order = start_time = end_time = None\n contents = []\n elif not order:\n order = eval(line)\n elif not start_time and not end_time:\n times = line.split(' ')\n start_time = times[0]\n end_time = times[2]\n else:\n contents.append(line)\n\n def order_by_starttime(self):\n self.ordered_subtitle_items = sorted(self.subtitle_items, key=lambda x: x.start_time)\n order = 1\n for item in self.ordered_subtitle_items:\n item.set_order(order)\n order += 1\n\n def generate_ordered_subtitle_file(self, path):\n if not self.ordered_subtitle_items:\n return\n with open(path, 'w', encoding='utf-8') as f:\n for item in self.ordered_subtitle_items:\n writes = item.generate_writable_contents()\n f.writelines(writes)\n\ndef main():\n st = SubTitle('sample.srt')\n st.order_by_starttime()\n st.generate_ordered_subtitle_file('ordered.srt')\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","sub_path":"srt/subtitle.py","file_name":"subtitle.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317240372","text":"from logzero import logger\nimport logzero\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\n\nimport requests\n\nimport os, time, datetime\nimport imaplib, email, re, pyotp, pytz\n\n\nclass MoneyForward:\n def __init__(self) -> None:\n self.stock_price_cache: dict[str, float] = dict()\n\n def init(self):\n logger.info(\"selenium initializing...\")\n options = webdriver.ChromeOptions()\n options.add_argument(\"--headless\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--window-size=800x1000\")\n options.add_argument(\"--disable-application-cache\")\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--hide-scrollbars\")\n options.add_argument(\"--lang=ja-JP\")\n options.add_argument(\"--ignore-certificate-errors\")\n options.add_argument(\"--blink-settings=imagesEnabled=false\")\n options.add_argument(\n \"--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36\"\n )\n options.binary_location = \"/usr/bin/chromium-browser\"\n self.driver = webdriver.Chrome(options=options)\n self.wait = WebDriverWait(self.driver, 5)\n self.driver.implicitly_wait(10)\n if not \"ALPHAVANTAGE_API_KEY\" in os.environ:\n raise ValueError(\"env ALPHAVANTAGE_API_KEY is not found.\")\n self.alphavantage_apikey = os.environ[\"ALPHAVANTAGE_API_KEY\"]\n\n def login(self):\n self.driver.execute_script(\"window.open()\")\n if not \"MF_ID\" in os.environ or not \"MF_PASS\" in os.environ:\n raise ValueError(\"env MF_ID and/or MF_PASS are not found.\")\n mf_id = os.environ[\"MF_ID\"]\n mf_pass = os.environ[\"MF_PASS\"]\n\n self.driver.get(\"https://moneyforward.com/sign_in\")\n self.wait.until(ec.presence_of_all_elements_located)\n self.driver.find_element(by=By.XPATH, value='//img[@alt=\"email\"]').click()\n self.wait.until(ec.presence_of_all_elements_located)\n\n login_time = datetime.datetime.now(pytz.timezone(\"Asia/Tokyo\"))\n self.send_to_element('//input[@type=\"email\"]', mf_id)\n self.driver.find_element(by=By.XPATH, value='//button[@id=\"submitto\"]').click()\n self.wait.until(ec.presence_of_all_elements_located)\n time.sleep(3)\n self.send_to_element('//input[@type=\"password\"]', mf_pass)\n self.driver.find_element(by=By.XPATH, value='//button[@id=\"submitto\"]').click()\n self.wait.until(ec.presence_of_all_elements_located)\n\n if self.driver.find_elements(by=By.ID, value=\"page-home\"):\n logger.info(\"successfully logged in.\")\n # New type of MoneyForward two step verifications\n elif self.driver.current_url.startswith(\n \"https://id.moneyforward.com/two_factor_auth/totp\"\n ):\n self.confirm_two_step_verification_param()\n if os.environ[\"MF_TWO_STEP_VERIFICATION\"].lower() == \"totp\":\n confirmation_code = self.get_confirmation_code_from_totp()\n else:\n raise ValueError(\n \"unsupported two step verification is found. check your env MF_TWO_STEP_VERIFICATION.\"\n )\n self.send_to_element('//*[@name=\"otp_attempt\"]', confirmation_code)\n self.driver.find_element(\n by=By.XPATH, value='//button[@id=\"submitto\"]'\n ).click()\n self.wait.until(ec.presence_of_all_elements_located)\n if self.driver.find_elements(\n by=By.XPATH, value='//div[contains(@class,\"registerLaterWrapper\")]/a'\n ):\n logger.info(\n \"recognized as unknown devise and selecting register later.\"\n )\n self.driver.find_element(\n by=By.XPATH,\n value='//div[contains(@class,\"registerLaterWrapper\")]/a',\n ).click()\n self.wait.until(ec.presence_of_all_elements_located)\n if self.driver.find_elements(by=By.ID, value=\"home\"):\n logger.info(\"successfully logged in.\")\n else:\n logger.debug(self.driver.current_url)\n raise ValueError(\"failed to log in.\")\n # Old type of MoneyForward two step verifications\n elif self.driver.find_elements(by=By.ID, value=\"page-two-step-verifications\"):\n self.confirm_two_step_verification_param()\n if os.environ[\"MF_TWO_STEP_VERIFICATION\"].lower() == \"gmail\":\n logger.info(\"waiting confirmation code from Gmail...\")\n confirmation_code = self.get_confirmation_code_from_gmail(login_time)\n else:\n raise ValueError(\n \"unsupported two step verification is found. check your env MF_TWO_STEP_VERIFICATION.\"\n )\n self.driver.get(\n \"https://moneyforward.com/users/two_step_verifications/verify/{confirmation_code}\".format(\n confirmation_code=confirmation_code\n )\n )\n self.wait.until(ec.presence_of_all_elements_located)\n self.driver.get(\"https://moneyforward.com/users/sign_in\")\n if self.driver.find_elements(by=By.ID, value=\"home\"):\n logger.info(\"successfully logged in.\")\n else:\n raise ValueError(\"failed to log in.\")\n else:\n raise ValueError(\"failed to log in.\")\n\n def portfolio(self):\n usdrate = self.usdrate()\n logger.info(\"USDJPY: \" + str(usdrate))\n self.driver.get(\"https://moneyforward.com/bs/portfolio\")\n self.wait.until(ec.presence_of_all_elements_located)\n elements = self.driver.find_elements(\n by=By.XPATH, value='//*[@id=\"portfolio_det_eq\"]/table/tbody/tr'\n )\n for i in range(len(elements)):\n tds = elements[i].find_elements(by=By.TAG_NAME, value=\"td\")\n name = tds[1].text\n if name[0:1] == \"#\":\n entry = name.split(\"-\")\n stock_price = self.stock_price(entry[1])\n stock_count = int(entry[2])\n logger.info(\n entry[0]\n + \": \"\n + entry[1]\n + \" is \"\n + str(stock_price)\n + \"USD (\"\n + str(int(usdrate * stock_price))\n + \" JPY) x \"\n + str(stock_count)\n )\n img = tds[11].find_element(by=By.TAG_NAME, value=\"img\")\n self.driver.execute_script(\"arguments[0].click();\", img)\n det_value = tds[11].find_element(by=By.ID, value=\"user_asset_det_value\")\n commit = tds[11].find_element(by=By.NAME, value=\"commit\")\n time.sleep(1)\n self.send_to_element_direct(\n det_value, str(int(usdrate * stock_price) * stock_count)\n )\n commit.click()\n time.sleep(1)\n logger.info(entry[0] + \" is updated.\")\n elements = self.driver.find_elements(\n by=By.XPATH, value='//*[@id=\"portfolio_det_eq\"]/table/tbody/tr'\n ) # avoid stale error\n\n def stock_price(self, tick):\n if not tick in self.stock_price_cache:\n for retry in range(3):\n r = requests.get(\n f\"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={tick}&apikey={self.alphavantage_apikey}\"\n )\n if r.status_code != 200:\n raise ConnectionRefusedError()\n data = r.json()\n if \"Global Quote\" in data:\n self.stock_price_cache[tick] = float(\n data[\"Global Quote\"][\"05. price\"]\n )\n break\n return self.stock_price_cache[tick]\n\n def usdrate(self):\n r = requests.get(\n f\"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=USD&to_currency=JPY&apikey={self.alphavantage_apikey}\"\n )\n if r.status_code != 200:\n raise ConnectionRefusedError()\n data = r.json()\n return float(data[\"Realtime Currency Exchange Rate\"][\"5. Exchange Rate\"])\n\n def close(self):\n try:\n self.driver.close()\n except:\n logger.debug(\"Ignore exception (close)\")\n try:\n self.driver.quit()\n except:\n logger.debug(\"Ignore exception (quit)\")\n\n ################## Two step verification ###################\n\n def confirm_two_step_verification_param(self):\n logger.info(\"two step verification is enabled.\")\n if not \"MF_TWO_STEP_VERIFICATION\" in os.environ:\n raise ValueError(\"env MF_TWO_STEP_VERIFICATION is not found.\")\n\n def get_confirmation_code_from_totp(self):\n if not \"MF_TWO_STEP_VERIFICATION_TOTP_SECRET_KEY\" in os.environ:\n raise ValueError(\n \"env MF_TWO_STEP_VERIFICATION_TOTP_SECRET_KEY are not found.\"\n )\n confirmation_code = pyotp.TOTP(\n os.getenv(\"MF_TWO_STEP_VERIFICATION_TOTP_SECRET_KEY\")\n ).now()\n return confirmation_code\n\n def get_confirmation_code_from_gmail(self, sent_since):\n if (\n not \"MF_TWO_STEP_VERIFICATION_GMAIL_ACCOUNT\" in os.environ\n or not \"MF_TWO_STEP_VERIFICATION_GMAIL_APP_PASS\" in os.environ\n ):\n raise ValueError(\n \"env MF_TWO_STEP_VERIFICATION_GMAIL_ACCOUNT and/or MF_TWO_STEP_VERIFICATION_GMAIL_APP_PASS are not found.\"\n )\n timeout = int(os.getenv(\"MF_TWO_STEP_VERIFICATION_TIMEOUT\", \"180\"))\n interval = int(os.getenv(\"MF_TWO_STEP_VERIFICATION_INTERVAL\", \"5\"))\n deadline = time.time() + timeout\n while time.time() < deadline:\n confirmation_code = self.read_confirmation_code_from_gmail(sent_since)\n if confirmation_code:\n return confirmation_code\n time.sleep(interval)\n\n def read_confirmation_code_from_gmail(self, sent_since):\n gmail_account = os.getenv(\"MF_TWO_STEP_VERIFICATION_GMAIL_ACCOUNT\")\n gmail_app_pass = os.getenv(\"MF_TWO_STEP_VERIFICATION_GMAIL_APP_PASS\")\n gmail = imaplib.IMAP4_SSL(\"imap.gmail.com\", \"993\")\n gmail.login(gmail_account, gmail_app_pass)\n gmail.select()\n search_option = (\n '(FROM \"feedback@moneyforward.com\" SENTSINCE {sent_since})'.format(\n sent_since=sent_since.strftime(\"%d-%b-%Y\")\n )\n )\n head, data = gmail.search(None, search_option)\n\n confirmation_code = \"\"\n for num in data[0].split():\n h, d = gmail.fetch(num, \"(RFC822)\")\n raw_email = d[0][1]\n message = email.message_from_string(raw_email.decode(\"utf-8\"))\n message_encoding = (\n email.header.decode_header(message.get(\"Subject\"))[0][1]\n or \"iso-2022-jp\"\n )\n subject_header = email.header.decode_header(message.get(\"Subject\"))[0][0]\n subject = str(subject_header.decode(message_encoding))\n if subject != \"【マネーフォワード ME】2段階認証メール\":\n continue\n date_header = email.header.decode_header(message.get(\"Date\"))\n message_time = datetime.datetime.strptime(\n date_header[0][0], \"%a, %d %b %Y %H:%M:%S %z\"\n ) # RFC 2822 format\n if sent_since < message_time:\n body = (\n message.get_payload()[0]\n .get_payload(decode=True)\n .decode(encoding=message_encoding)\n )\n m = re.search(\n r\"https://moneyforward.com/users/two_step_verifications/verify/([0-9]+)\",\n body,\n )\n confirmation_code = m.group(1)\n sent_since = message_time\n\n gmail.close()\n gmail.logout()\n return confirmation_code\n\n ############################################################\n\n def print_html(self):\n html = self.driver.execute_script(\n \"return document.getElementsByTagName('html')[0].innerHTML\"\n )\n print(html)\n\n def send_to_element(self, xpath, keys):\n element = self.driver.find_element(by=By.XPATH, value=xpath)\n element.clear()\n logger.debug(\"[send_to_element] \" + xpath)\n element.send_keys(keys)\n\n def send_to_element_direct(self, element, keys):\n element.clear()\n logger.debug(\"[send_to_element] \" + element.get_attribute(\"id\"))\n element.send_keys(keys)\n\n\nif __name__ == \"__main__\":\n if \"LOG_LEVEL\" in os.environ:\n logzero.loglevel(int(os.environ[\"LOG_LEVEL\"]))\n mf = MoneyForward()\n try:\n mf.init()\n mf.login()\n mf.portfolio()\n finally:\n mf.close()\n","sub_path":"mf.py","file_name":"mf.py","file_ext":"py","file_size_in_byte":13282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612502675","text":"\"\"\"\n_InsertComponent_\n\nMySQL implementation of UpdateWorker\n\"\"\"\n\n__all__ = []\n\n\n\nimport time\nfrom WMCore.Database.DBFormatter import DBFormatter\n\nclass UpdateWorker(DBFormatter):\n\n sqlpart1 = \"\"\"UPDATE wm_workers\n SET last_updated = :last_updated\n \"\"\"\n sqlpart2 = \"\"\"WHERE component_id = :component_id\n AND name = :worker_name\"\"\"\n\n def execute(self, componentID, workerName, state = None,\n pid = None, conn = None, transaction = False):\n\n binds = {\"component_id\": componentID,\n \"worker_name\": workerName,\n \"last_updated\": int(time.time())}\n\n if state:\n binds[\"state\"] = state\n self.sqlpart1 += \", state = :state\"\n if pid:\n binds[\"pid\"] = pid\n self.sqlpart1 += \", pid = :pid\"\n\n sql = self.sqlpart1 + \" \" + self.sqlpart2\n\n self.dbi.processData(sql, binds, conn = conn,\n transaction = transaction)\n return\n","sub_path":"src/python/WMCore/Agent/Database/MySQL/UpdateWorker.py","file_name":"UpdateWorker.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531583539","text":"import csv\n\nyears = []\nf = open(\"../html/tables/csv/cats/drivers.txt\").read()\nfor fileName in f.split():\n\tcsvData = csv.reader(open(fileName))\n\tyears.append(csvData)\n\ndrivers = {}\nfor year in years:\n\tfirst = True\n\tfor row in year:\n\t\tif first:\n\t\t\tfirst = False\n\t\t\tcontinue\n\t\tif len(row) > 1:\n\t\t\tdrivers[row[1]] = 1\n\nprint(drivers)\n\n","sub_path":"python/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200925305","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('properties', '0006_auto_20151123_1525'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='contactus',\n options={'verbose_name_plural': 'Contact Us'},\n ),\n migrations.AlterField(\n model_name='property',\n name='sex',\n field=models.CharField(max_length=10, choices=[(b'Male', b'Male'), (b'Female', b'Female'), (b'Any', b'Any')]),\n ),\n ]\n","sub_path":"properties/migrations/0007_auto_20151125_2330.py","file_name":"0007_auto_20151125_2330.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"189071586","text":"import sys\n\n\nfrom PyQt5.QtGui import *\nfrom qgis.analysis import QgsNativeAlgorithms\nfrom qgis.core import *\n\n\nfrom qgis.core import *\n\n\n# Tell Python where you will get processing from\nsys.path.append(r'C:\\Program Files\\QGIS 3.0\\apps\\qgis\\python\\plugins')\n# Reference the algorithm you want to run\nfrom plugins.processing.algs.qgis.PointDistance import *\n\n\ndef upload_new_layer(path, name):\n \"\"\"Upload shp layers\"\"\"\n layer_name = \"layer\" + name\n provider_name = \"ogr\"\n layer = QgsVectorLayer(\n path,\n layer_name,\n provider_name)\n return layer\n\n\nif __name__ == \"__main__\":\n\n QgsApplication.setPrefixPath(r'C:\\Program Files\\QGIS 3.0\\apps\\qgis', True)\n app = QGuiApplication([])\n QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())\n QgsApplication.initQgis()\n feedback = QgsProcessingFeedback()\n\n \"\"\"Upload input data\"\"\"\n work_folder = os.path.dirname(__file__)\n input = os.path.join(os.path.split(os.path.split(work_folder)[0])[0], r'general/intersections.shp')\n input = 'C:/Users/achituv/AppData/Roaming/QGIS/QGIS3/profiles/default/python/plugins/visibilitysyntax/processing/intersections.shp'\n \n INPUT_FIELD = 'vis_id'\n TARGET = 'C:/Users/achituv/AppData/Roaming/QGIS/QGIS3/profiles/default/python/plugins/visibilitysyntax/processing/intersections.shp'\n TARGET_FIELD = 'vis_id'\n OUTPUT = r'C:\\Users\\achituv\\AppData\\Roaming\\QGIS\\QGIS3\\profiles\\default\\python\\plugins\\visibilitysyntax\\test\\mean_close_point/distance_matrix.shp'\n params = {'INPUT': input, 'INPUT_FIELD': INPUT_FIELD, 'TARGET': TARGET, 'TARGET_FIELD': TARGET_FIELD,\n 'OUTPUT': OUTPUT,\n 'MATRIX_TYPE': 0, 'NEAREST_POINTS': 10, 'OUTPUT': OUTPUT}\n \n alg = PointDistance()\n alg.initAlgorithm()\n \n # Some preprocessing for context\n project = QgsProject.instance()\n \n target_crs = QgsCoordinateReferenceSystem()\n layer_1 = upload_new_layer(input, \"test\")\n target_crs.createFromOgcWmsCrs(layer_1.crs().authid())\n project.setCrs(target_crs)\n context = QgsProcessingContext()\n context.setProject(project)\n alg.processAlgorithm(params, context, feedback=feedback)\n\n \"\"\"For standalone application\"\"\"\n # Exit applications\n QgsApplication.exitQgis()\n app.exit()\n","sub_path":"work_folder/mean_close_point/distance_matrix.py","file_name":"distance_matrix.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197266018","text":"from glob import glob\n\nimport cv2\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser(description='make video')\nparser.add_argument('--mode', type=str, default='VIDEO')\nparser.add_argument('--image_dir', type=str, default='/home/onepredict/Myungkyu/BVMS_turbine/01_Training_Result/03_Output_image/BTT_AE_2019_11_25_15_49_53')\n\nargs = parser.parse_args()\n\ndef make_video(img_dir):\n images = [img for img in sorted(os.listdir(img_dir))]\n for image in sorted(images):\n image_paths = sorted(glob(os.path.join(img_dir, image, '*.png')))\n\n for idx, image_path in enumerate(image_paths):\n frame = cv2.imread(image_path)\n height, width, _ = frame.shape\n\n video = cv2.VideoWriter(img_dir+'/Anomaly score variation.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 30.0, (width, height), True)\n video.write(frame)\n\n print('converting!: {}'.format(idx + 1))\n video.release()\n\nif __name__ == '__main__':\n if args.mode == 'VIDEO':\n make_video(args.image_dir)","sub_path":"Data_Generation/ImageToVideo.py","file_name":"ImageToVideo.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8827781","text":"from opengever.core.upgrade import SchemaMigration\nfrom sqlalchemy import Column\nfrom sqlalchemy import Integer\nfrom sqlalchemy.sql.expression import column\nfrom sqlalchemy.sql.expression import table\n\n\nclass AddDecisionNrToProposal(SchemaMigration):\n\n profileid = 'opengever.meeting'\n upgradeid = 4621\n\n def migrate(self):\n self.add_decision_sequence_to_period()\n self.add_decision_number_to_agenda_item()\n\n def add_decision_sequence_to_period(self):\n self.op.add_column(\n 'periods',\n Column('decision_sequence_number', Integer, nullable=True))\n\n periods_table = table(\n 'periods',\n column('id'), column('decision_sequence_number'))\n self.execute(periods_table.update().values(decision_sequence_number=0))\n\n self.op.alter_column('periods', 'decision_sequence_number',\n existing_type=Integer,\n nullable=False)\n\n def add_decision_number_to_agenda_item(self):\n self.op.add_column(\n 'agendaitems', Column('decision_number', Integer))\n","sub_path":"opengever/meeting/upgrades/to4621.py","file_name":"to4621.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592692683","text":"#!/bin/python3\n\nimport sys\n\n\nt = int(input().strip())\nfor a0 in range(t):\n n,k = input().strip().split(' ')\n n,k = [int(n),int(k)]\n num = input().strip()\n numberList = [int(s) for s in num]\n res = 0\n for i in range(0, n - k + 1):\n t = 1\n for j in range(i, i + k):\n t = t * numberList[j]\n res = max(res, t)\n print(res)\n","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73963966","text":"import logging\nimport struct\n\nfrom config import *\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\n# https://stackoverflow.com/questions/9940859/fastest-way-to-pack-a-list-of-floats-into-bytes-in-python\n\nclass Page:\n def __init__(self):\n # add modified bit\n self.num_records = 0\n self.records = {}\n self.record_size = None\n self.record_format = None\n self.max_records = None\n self.dirty = False\n\n def has_capacity(self):\n return self.num_records < self.max_records\n\n def get_max(self):\n return self.max_records\n\n def read(self, rid):\n if rid not in self.records:\n raise KeyError('rid not a valid key')\n\n return self.records[rid]\n\n def delete_record(self, rid):\n self.dirty = True\n return self.records.pop(rid, NULL_RID)\n\n def pack(self):\n data = bytearray(PAGE_SIZE)\n\n for i, key in enumerate(self.records.keys()):\n record_data = struct.pack(self.record_format, key, *self.records[key])\n data[i * self.record_size:i * self.record_size + self.record_size] = record_data\n\n data.ljust(4096, b'0')\n\n return data\n\n def unpack(self, data):\n self.dirty = True\n self.records.clear()\n self.num_records = 0\n\n format_size = struct.Struct(self.record_format).size\n\n i = 0\n while i <= 4096 - format_size:\n try:\n record = list(struct.unpack(self.record_format, data[i: i + format_size]))\n except:\n print(\" here\")\n if record[0] == 0:\n break\n self._unpack(record)\n i += format_size\n\n def update_record(self, key, data):\n if not self.records[key]:\n raise KeyError('rid not a valid key')\n\n self.dirty = True\n self.records[key] = data\n\n def __eq__(self, other):\n return (self.num_records == other.num_records and self.records == other.records\n and self.record_size == other.record_size and self.record_format == other.record_format\n and self.max_records == other.max_records and self.dirty == other.dirty)\n\nclass BasePage(Page):\n def __init__(self):\n super().__init__()\n self.record_size = RID_SIZE + VALUE_SIZE + SCHEMA_SIZE\n self.record_format = ENDIAN_FORMAT + RID_FORMAT + VALUES_FORMAT + SCHEMA_FORMAT\n self.max_records = int(PAGE_SIZE / self.record_size)\n\n def new_record(self, rid, value, dirty):\n if not self.has_capacity():\n raise MemoryError('No more space in Page')\n\n if dirty not in [0, 1]:\n raise ValueError('Dirty bit not a valid value')\n\n self.dirty = True\n self.records[rid] = [value] + [dirty]\n self.num_records += 1\n return rid\n\n def _unpack(self, values):\n self.new_record(*values)\n\n def get_dirty(self, rid):\n if rid not in self.records:\n raise KeyError('rid not valid key')\n\n return self.records[rid][1]\n\n def set_dirty(self, rid, dirty):\n if rid not in self.records:\n raise KeyError('rid not valid key')\n\n if dirty not in [0,1]:\n raise ValueError('Dirty bit not valid value')\n\n self.dirty = True\n self.records[rid][1] = dirty\n return 1\n\n\nclass TailPage(Page):\n def __init__(self, num_cols):\n super().__init__()\n\n if num_cols <= 0:\n raise ValueError('Number of columns cannot be <= 0')\n\n self.num_cols = num_cols\n self.record_size = RID_SIZE + num_cols * VALUE_SIZE\n self.record_format = ENDIAN_FORMAT + RID_FORMAT + num_cols * VALUES_FORMAT\n self.max_records = int(PAGE_SIZE / self.record_size)\n\n def new_record(self, rid, values):\n if not self.has_capacity():\n raise MemoryError('No more space in Page')\n\n if len(values) != self.num_cols:\n raise ValueError('Number of values not equal to number of columns')\n\n self.dirty = True\n self.records[rid] = values\n self.num_records += 1\n return rid\n\n def _unpack(self, values):\n self.new_record(values[0], values[1:])\n","sub_path":"src/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569162346","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.homepage, name='homepage'),\n url(r'^createCard/$',views.createCard,name = 'Create Card'),\n url(r'^confirm/$',views.confirm,name = 'confirm'),\n url(r'^cards/$',views.cards,name = 'cards'),\n url(r'^register/$',views.register,name = 'register'),\n url(r'^login/$',views.userlogin,name = 'login'),\n url(r'^logout/$',views.userlogout,name = 'logout'),\n url(r'^community/$',views.community,name = 'community'),\n url(r'^official-collection/$',views.official_collection,name = 'official_collection'),\n\n]","sub_path":"main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185796895","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\n\nfrom omegaconf import open_dict\n\nfrom hydra._internal.pathlib import Path\nfrom hydra.plugins.common.utils import (\n configure_log,\n get_overrides_dirname,\n filter_overrides,\n run_job,\n setup_globals,\n HydraConfig,\n)\nfrom hydra.plugins import Launcher\n\nlog = logging.getLogger(__name__)\n\n\nclass BasicLauncher(Launcher):\n def __init__(self):\n self.config = None\n self.config_loader = None\n self.task_function = None\n\n def setup(self, config, config_loader, task_function):\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n\n def launch(self, job_overrides):\n setup_globals()\n configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)\n sweep_dir = self.config.hydra.sweep.dir\n Path(str(sweep_dir)).mkdir(parents=True, exist_ok=True)\n log.info(\"Launching {} jobs locally\".format(len(job_overrides)))\n log.info(\"Sweep output dir : {}\".format(sweep_dir))\n runs = []\n\n for idx, overrides in enumerate(job_overrides):\n log.info(\"\\t#{} : {}\".format(idx, \" \".join(filter_overrides(overrides))))\n sweep_config = self.config_loader.load_sweep_config(\n self.config, list(overrides)\n )\n with open_dict(sweep_config):\n sweep_config.hydra.job.id = idx\n sweep_config.hydra.job.num = idx\n sweep_config.hydra.job.override_dirname = get_overrides_dirname(\n sweep_config.hydra.overrides.task\n )\n HydraConfig().set_config(sweep_config)\n ret = run_job(\n config=sweep_config,\n task_function=self.task_function,\n job_dir_key=\"hydra.sweep.dir\",\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n runs.append(ret)\n configure_log(self.config.hydra.hydra_logging, self.config.hydra.verbose)\n return runs\n","sub_path":"hydra/_internal/core_plugins/basic_launcher.py","file_name":"basic_launcher.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530230844","text":"import json\nimport psycopg2\nfrom bs4 import BeautifulSoup\nimport requests\nfrom config import *\nimport re\nfrom datetime import datetime\n\n\nCONN = psycopg2.connect(user=toyUSER,\n password=toyPASSWORD,\n host=toyHOST,\n dbname=toyNAME,\n port=5432)\n\n\ndef query(query):\n curs = CONN.cursor()\n try:\n curs.execute(query)\n except:\n print('Query error')\n curs.close()\n CONN.commit()\n\n\nclass EMSC_scraper():\n '''turning the functions above which pass around random data into a single\n class so that it can work as an object'''\n\n def __init__(self):\n # the page and row number to start collecting from\n self.page_num = 0 # first thing that happens is to incriment this\n self.row_num = 0\n # set the url that everything else will reference\n self.url = 'https://www.emsc-csem.org/Earthquake/?view='\n\n def find_yesterday(self):\n '''basic function to advance the object to start at yesterday.\n Needs to be run before get_yesterday'''\n today = True\n while(today):\n self.page_num += 1\n page = requests.get(self.url+str(self.page_num), timeout=5)\n page_soup = BeautifulSoup(page.text, 'html.parser')\n table = page_soup.find('tbody')\n rows = table.find_all('tr')\n for num, row in enumerate(rows):\n if row['class'][0] == 'autour':\n today = False\n self.row_num = num + 3 # get over the day break\n break\n\n def get_yesterday(self):\n '''gets the quakes and saves them to a list'''\n yesterday = True\n self.quakes = []\n while(yesterday):\n page = requests.get(self.url+str(self.page_num), timeout=5)\n page_soup = BeautifulSoup(page.text, 'html.parser')\n table = page_soup.find('tbody')\n rows = table.find_all('tr')\n for row in rows[self.row_num:]:\n if row['class'][0] != 'autour':\n cells = row.find_all('td')\n rawTime = row.find(class_=\"tabev6\").find('a').text\n timestring = re.sub('\\xa0\\xa0\\xa0', ' ', rawTime)\n dt = datetime.strptime(timestring, '%Y-%m-%d %H:%M:%S.%f')\n time = dt.timestamp() * 1000\n lat = float(cells[4].text) if cells[5].text.strip(\n '\\xa0') == 'N' else -float(cells[4].text)\n lon = float(cells[6].text) if cells[7].text.strip(\n '\\xa0') == 'E' else -float(cells[6].text)\n mag = float(cells[10].text)\n place = re.sub(\"'\", \"''\", cells[11].text.strip('\\xa0'))\n self.quakes.append({'time': time,\n 'lat': lat,\n 'lon': lon,\n 'mag': mag,\n 'place': place})\n else:\n yesterday = False\n break\n self.page_num += 1\n self.row_num = 0\n\n def construct_query(self):\n self.day_insert = 'INSERT INTO EMSC (place, time, lat, lon, mag) VALUES '\n for quake in self.quakes:\n row_insert = f\"('{quake['place']}', '{quake['time']}', {quake['lat']}, {quake['lon']}, {quake['mag']}), \"\n self.day_insert += row_insert\n self.day_insert = self.day_insert[:-2]+';'\n\n def query_yesterday(self):\n self.find_yesterday()\n self.get_yesterday()\n self.construct_query()\n return self.day_insert\n\n\ndef lambda_handler(event, context):\n scraper = EMSC_scraper()\n query(scraper.query_yesterday())\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from Lambda!')\n }\n\n\nif __name__ == '__main__':\n scrapper = EMSC_scraper()\n scrapper.find_yesterday()\n scrapper.get_yesterday()\n print(scrapper.quakes)\n","sub_path":"LAMBDA_LABS/quake-ds/EMSC_Lambda_function/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"75537498","text":"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nfrom src.devices.ack_device import ACKDevice\nfrom src.devices.bss_device import BSSDevice\nfrom src.devices.wss_device import WSSDevice\nfrom src.devices.zss_device import ZSSDevice\nfrom src.appium_conn import AppiumConn\n\nSLEEP_TIME_BETWEEN_ROUNDS_IN_SECOND = 10\n\n\ndef test_zts(ffs_type, name_of_plug_to_control_dut, name_of_plug_to_control_provisioner, name_of_dut, appium_server_port):\n \"\"\"\n The test method defines the main test flow as below\n 1. Setup Appium connection\n 2. Deregister the DUT and power it off\n 3. Power cycle/reboot the echo device (provisioner)\n 4. Power on DUT and check the registration\n\n \"\"\"\n device_type = {\n \"ack\": ACKDevice,\n \"bss\": BSSDevice,\n \"wss\": WSSDevice,\n \"zss\": ZSSDevice\n }\n names = [name_of_plug_to_control_dut, name_of_plug_to_control_provisioner, name_of_dut]\n device = device_type[ffs_type.lower()](names)\n\n AppiumConn.start_appium_server(appium_server_port)\n try:\n device.factory_reset_and_power_off()\n device.power_cycle_provisioner()\n device.power_on_and_check_setup()\n finally:\n AppiumConn.stop_appium_server()\n # Sleep some time to allow previous appium session closed properly for next round of test\n time.sleep(SLEEP_TIME_BETWEEN_ROUNDS_IN_SECOND)\n","sub_path":"tests/test_device_setup_zts.py","file_name":"test_device_setup_zts.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"359483065","text":"# 作业:在控制台输入信息,程序执行不同命令:\n# 控制台输入1:添加学生信息,学生所有信息需要通过控制台输入\n# 控制台输入2:修改学生信息,需要在控制台指明哪个学生的信息需要修改,同时在控制台输入修改的内容\n# 控制台输入3:删除学生信息,需要在控制台指明哪个学生需要被删除\n# 控制台输入4:查询所有学生信息\n# 控制台输入其他数字:退出查询系统\n\n# 方法一\nimport sqlite3\nconnect = sqlite3.connect('xueXinWangDB')\ncursor = connect.cursor()\n\ncursor.execute('create table if not exists xueKong_info (name text , age int , tel int)')\nconnect.commit()\n# 先关光标,再断链接\ncursor.close()\nconnect.close()\n\ndef add_xueKong_info_to_table():\n\n name = input('请输入名字')\n age = int(input('请输入年龄'))\n tel = int(input('请输入联系方式'))\n # 重新连接到数据���\n connect = sqlite3.connect('xueXinWangDB')\n cursor = connect.cursor()\n\n cursor.execute('insert into xueKong_info (name ,age ,tel) VALUES (\"{}\",\"{}\",\"{}\")'.format(name ,age ,tel))\n connect.commit()\n cursor.close()\n connect.close()\n\n# add_xueKong_info_to_table()\n\ndef delete_xueKong_info_to_table():\n name = input('请输入要删除的名字')\n\n connect = sqlite3.connect('xueXinWangDB')\n cursor = connect.cursor()\n\n cursor.execute('delete from xueKong_info WHERE name = \"{}\"'.format(name))\n connect.commit()\n cursor.close()\n connect.close()\n# delete_xueKong_info_to_table()\n\ndef update_xueKong_info_to_table():\n select = input('请输入要执行的命令 0:修改全部内容 1:只修改名字')\n name = input('请输入要修改的名字')\n new_name = input('请输入新的名字')\n connect = sqlite3.connect('xueXinWangDB')\n cursor = connect.cursor()\n if select == '0':\n new_age = int(input('请输入年龄'))\n new_tel = int(input('请输入联系方式'))\n cursor.execute('update xueKong_info set name = \"{}\",age = \"{}\",tel = \"{}\" WHERE name = \"{}\"'.format(new_name,new_age,new_tel))\n elif select =='1' :\n cursor.execute('update xueKong_info set name = \"{}\" WHERE name = \"{}\"'.format(new_name ,name))\n else:\n print('命令无效')\n connect.commit()\n cursor.close()\n connect.close()\n# update_xueKong_info_to_table()\n\ndef get_xueKomn_info_to_table():\n connect = sqlite3.connect('xueXinWangDB')\n cursor = connect.cursor()\n\n cursor.execute('select * from xueKong_info')\n result = cursor.fetchall()\n print(result)\n connect.commit()\n cursor.close()\n connect.close()\n# get_xueKomn_info_to_table()\n\nwhile True:\n value = int(input('请输入要执行的命令'))\n\n if value == 1:\n add_xueKong_info_to_table()\n elif value == 2:\n update_xueKong_info_to_table()\n elif value == 3:\n delete_xueKong_info_to_table()\n elif value == 4:\n get_xueKomn_info_to_table()\n else:\n print('退出系统')\n break\n\n# 方法二、\n\n","sub_path":"python/7/16/8.数据库作业再次回顾.py","file_name":"8.数据库作业再次回顾.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"388010396","text":"from random import choice\n\nwords = [\"take\", \"game\", \"whatever\", \"kid\"]\nbody = [\"\"\"\n______\n| |\n|\n|\n|\n|\n````````\n\"\"\", \"\"\"\n______\n| |\n| O\n|\n|\n|\n````````\n\"\"\", \"\"\"\n______\n| |\n| O\n| |\n|\n|\n````````\n\"\"\", \"\"\"\n______\n| |\n| O\n| /|\n|\n|\n````````\n\"\"\", \"\"\"\n______\n| |\n| O\n| /|\\\\\n|\n|\n````````\n\n\"\"\", \"\"\"\n______\n| |\n| O\n| /|\\\\\n| /\n|\n````````\n\"\"\", \"\"\"\n______\n| |\n| O\n| /|\\\\\n| / \\\\\n|\n````````\n\"\"\"]\n\nrandom_word = choice(words)\nanswer = [\"_\" for l in random_word]\npart = 0\nletter = 0\n\nwhile 1:\n print(body[part])\n for l in answer:\n print(l, end=\" \")\n print(\"\\n\")\n\n if \"_\" not in answer:\n print(\"You win!\")\n break\n elif part == len(body) - 1:\n print(\"You lose!\\nThe answer was: {}\".format(random_word))\n break\n\n guess = input(\"Guess a letter(0 = give up): \")\n if guess == \"0\": break\n\n if guess != random_word[letter]:\n part += 1\n else:\n answer[letter] = guess\n letter += 1\n\n","sub_path":"Hangman_Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487769364","text":"### This is a program for tracking the secular evolution of a system of planets\n## It tracks eccentricity and the AMD held by each planet over time\n## Author: Matthew M Murphy - undergraduate - Stony Brook University\n## Last updated: Dec. 2019\n######################################################################################\n\nimport rebound # I use the REBOUND package, see its online documentation\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\n# must first input parameters from command line\nsysid = sys.argv[1] # input system number\nmass = float(sys.argv[2]) # planet mass\ninitial_e = float(sys.argv[3]) # initial eccentricity\nalpha = float(sys.argv[4]) # alpha spacing parameter\na1set = float(sys.argv[5]) # semi-major axis\na2set = float(sys.argv[6])\na3set = float(sys.argv[7])\nf1set = float(sys.argv[8]) # mean-longitude\nf2set = float(sys.argv[9])\nf3set = float(sys.argv[10])\n\n# Setting initial system values\n# Currently is a 3-planet system\nm_1 = mass # mass of innermost planet\nr_1 = 0.000477 # radius of innermost planet\nm_2 = m_1 # 2,3 are the other planets moving outward\nr_2 = r_1\nm_3 = m_2\nr_3 = r_2\n\ne_i = initial_e\nf_1 = f1set \nf_2 = f2set \nf_3 = f3set \n\ndef Sim(): # Simulation setup\n sim = rebound.Simulation()\n sim.units = ('yr','AU','Msun')\n sim.add(m=1.,r=0.005) # solar-like host star\n a1 = a1set # separations based on \n a2 = a2set # alpha = a_inner / a_outer\n a3 = a3set\n sim.add(m=m_1,a=a1,e=e_i,f=f_1,r=r_1) # zero inclination orbits\n sim.add(m=m_2,a=a2,e=e_i,f=f_2,r=r_2) \n sim.add(m=m_3,a=a3,e=e_i,f=f_3,r=r_3)\n sim.integrator = 'whfast' # Using the WH integrator\n sim.dt = 1e-3 # Symplectic timestep\n sim.ri_whfast.safe_mode=0\n sim.ri_whfast.corrector = 11 # 11-th order symplectic correction\n sim.move_to_com()\n return sim\n\ndef AMD(m,a,e): # Canonical AMD definition\n Lambda = m*np.sqrt(sim.G*a*(m+1))/(m+1)\n return Lambda*(1-np.sqrt(1-e**2))\n\ndef EncTest(p,N,time): # Check for orbit crossing, close encounter, or collision\n for j,planet in enumerate(p):\n if j>1:\n sep = np.sqrt((p[j].x-p[j-1].x)**2 + (p[j].y-p[j-1].y)**2 + (p[j].z-p[j-1].z)**2)\n Rhillmut = pow((p[j].m+p[j-1].m)/3.,1./3)*((p[j].a + p[j-1].a)/2.)\n if (p[j].a*(1-p[j].e) < p[j-1].a*(1+p[j-1].e)):\n return 'true'\n elif abs(sep) < Rhillmut:\n return 'true'\n if N != 3:\n return 'true'\n else:\n return 'none'\n \n# Now, to finally run the simulation\n################################################################\nsim = Sim() # Sets up the system\np_init = sim.particles # Array of initial condition\ninner_period = p_init[1].P\n\ntmax = 2.e8*inner_period # Maximum integration time \n\nNintsteps = int(tmax / (1*inner_period))\nint_times = np.linspace(0,tmax,Nintsteps)\n\n################################################################\n\nsim.collision = 'direct' # Simulation treats any collisions between planets as a direct\nsim.collision_resolve = 'merge' # collision and will merge them, conserving momentum and energy\n\n# Creating a data file for output\ndata_name = str(sysid)+'data'\ndata_title = data_name+'.txt'\ndfile = open(data_title,'w')\n\n\n# Now running the integration\n\nfor i,time in enumerate(int_times):\n\tsim.integrate(time, exact_finish_time=0)\n\tp = sim.particles\n\tcheck = EncTest(p, sim.N - 1, time)\n\tif check == 'true': \n\t\tbreak \n\tA1 = AMD(p[1].m,p[1].a,p[1].e) \n\tA2 = AMD(p[2].m,p[2].a,p[2].e) \n\tA3 = AMD(p[3].m,p[3].a,p[3].e)\n\tif (i%1000 == 0): # Output each 1000 \n\t\two = '{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(time,p[1].e,p[2].e,p[3].e,A1,A2,A3)\n\t\tdfile.write(wo)\n\t\tdfile.write('\\n')\n\ndfile.close()\n","sub_path":"csim.py","file_name":"csim.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438767821","text":"#coding=gbk\r\n'''\r\nCreated on 2019年2月16日\r\n\r\n@author: G2435\r\n'''\r\n''' 没有返回值的函数 \r\n 1.没有return语句\r\n 2.return语句后面没有跟任何值\r\n'''\r\ndef fun1(flag):\r\n if flag:\r\n print(\"满足条件\")\r\n else:\r\n print(\"不满足条件\")\r\nfun1(1)\r\n\r\ndef fun(flag):\r\n ''' 直接退出了'''\r\n if flag:\r\n \r\n return\r\n print(\"满足条件\")\r\nfun(1)\r\nprint(fun.__doc__)\r\n''' 改变函数参数的值\r\n 值传递(数值、字符串、布尔) 用函数修改参数时并不修改外部的变量值\r\n 引用传递(符合类型、列表、字典、对象)\r\n'''\r\nx = 100\r\ns = 'hello world'\r\ndef test1(x,s):\r\n x = 20\r\n s = \"ww\"\r\ntest1(x,s)\r\nprint(x,s)\r\n\r\n\r\ndi = {}\r\ndi[\"name\"] = []\r\ndi[\"d\"] = {}\r\nusrname = input(\"请输入姓名中间以逗号隔开\")\r\ndd = input(\"请输入字典数据,key,value之间用逗号分隔\")\r\ndi[\"name\"].extend(usrname.split(\",\"))\r\nprint(di)\r\nkeys = dd.split(\",\")[::2]\r\nvalues = dd.split(\",\")[1::2]\r\ndii = dict(zip(keys,values))\r\ndi[\"d\"].update(dii)\r\nprint(di)","sub_path":"Python学习基础知识/python基础/第七节:函数/函数值.py","file_name":"函数值.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"490552387","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport cv2\nimport os\nimport time\nimport math\nimport sys\nimport copy\nimport random\nimport matplotlib \nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport argparse\nimport _init_paths\n\nfrom skimage import exposure\nfrom sys import argv\nfrom googlenet import GoogleNet as GoogleNet\nfrom alexnet import AlexNet as AlexNet\nfrom dataset import DataSet\n\ndef load_training_data():\n\tclassList = []\n\timageList = []\n\tperClassCnt = [0] * num_class\n\t# Load the original data\n\tfor root, dirs, files in os.walk(image_path):\n\t\t# write the class names into a txt file\n\t\tif(dirs != []):\n\t\t\tfor dir in dirs:\n\t\t\t\tif(dir != \"ImageSets\"):\n\t\t\t\t\tclassList.append(dir)\n\n\twith open(os.path.join(model_path, model_name, 'classes.txt'), 'w') as f:\n\t\tfor i in range(len(classList)):\n\t\t\tf.write(\"{}\\n\".format(classList[i]))\n\n\twith open(os.path.join(image_path, \"ImageSets\", \"train.txt\"), \"r\") as f:\n\t\tdata = f.readlines()\n\t\timageNum = len(data)\n\t\tfor name in data:\n\t\t\timageList.append(name.replace(\"\\n\", \"\"))\n\t\t\tfor i in range(num_class):\n\t\t\t\tif(name.find(classList[i]) > 0):\n\t\t\t\t\tperClassCnt[i] += 1\n\t\t\t\t\tbreak\n\t\n\tperClassCnt = np.array(perClassCnt)\n\ttemp = imageNum / perClassCnt\n\talpha = temp / np.sum(temp)\n\n\t# Shuffle the data\n\timageList = np.array(imageList)\n\tperm = np.arange(len(imageList))\n\tnp.random.shuffle(perm)\n\timageList = imageList[perm]\n\n\tbatchCnt = int(np.ceil(float(imageNum) / float(databatch)))\n\n\treturn batchCnt, classList, imageNum, imageList, alpha\n\ndef get_other_data(path):\n\timageList = []\n\tlabelList = []\n\tnameList = []\n\twith open(path, \"r\") as f:\n\t\tdata = f.readlines()\n\t\t# imageNum = len(data)\n\t\tfor name in data:\n\t\t\tabspath = name.replace(\"\\n\", \"\")\n\t\t\timg = cv2.imread(abspath)\n\t\t\tif(network == \"GoogleNet\"):\n\t\t\t\tresize = cv2.resize(img,(224,224)).astype(\"float32\") #resize到(224,224)尺寸\n\t\t\telif(network == \"AlexNet\"):\n\t\t\t\tresize = cv2.resize(img,(227,227)).astype(\"float32\") #resize到(227,227)尺寸\n\t\t\tfor i in range(num_class):\n\t\t\t\tif(abspath.find(classList[i]) > 0):\n\t\t\t\t\tlab = i\n\t\t\t\t\tbreak\n\t\t\timageList.append(resize)\n\t\t\tlabelList.append(lab)\n\t\t\tnameList.append(abspath)\n\treturn np.array(imageList), np.array(labelList), np.array(nameList)\n\ndef fill_feed_dict(data_set, images_pl, labels_pl):\n\timages_feed, labels_feed ,names_feed= data_set.next_batch(batch_size)\n\tfeed_dict = {\n\t\timages_pl: images_feed,\n\t\tlabels_pl: labels_feed,\n\t}\n\treturn feed_dict, names_feed, labels_feed\n\ndef loss_f(logits, labels):\n\tlabels = tf.to_int64(labels)\n\tcross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n\tcross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n\treturn cross_entropy_mean\n\ndef focal_loss(logits,labels):\n\toneHotLabel = tf.one_hot(labels, num_class, 1.0, 0.0)\n\tgamma = 2\n\tloss = 0\n\tfor batchNum in range(batch_size):\n\t\tlab = oneHotLabel[batchNum]\n\t\tpre = logits[batchNum]\n\t\tll = alpha * tf.pow(1 - pre + 1e-10, gamma) * lab * tf.log(pre + 1e-10) * num_class\n\t\tloss += -tf.reduce_mean(ll)\n\tloss /= (batch_size * 1.0)\n\treturn loss\n\ndef training(loss, learning_rate):\n\tif(optimizer_flag == \"GD\"):\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\telif(optimizer_flag == \"Adadelta\"):\n\t\toptimizer = tf.train.AdadeltaOptimizer(learning_rate)\n\telif(optimizer_flag == \"Adagrad\"):\n\t\toptimizer = tf.train.AdagradOptimizer(learning_rate)\n\telif(optimizer_flag == \"Momentum\"):\n\t\toptimizer = tf.train.MomentumOptimizer(learning_rate,0.9)\n\telif(optimizer_flag == \"Adam\"):\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate)\n\tglobal_step = tf.Variable(0, name='global_step', trainable=False)\n\ttrain_op = optimizer.minimize(loss, global_step=global_step)\n\treturn train_op\n\ndef evaluation(logits, labels):\n\tcorrect = tf.nn.in_top_k(logits, labels, 1)\n\treturn tf.reduce_sum(tf.cast(correct, tf.int32)),tf.argmax(logits, 1)\n\ndef do_eval(sess,\n\t\t\teval_correct,\n\t\t\timages_placeholder,\n\t\t\tlabels_placeholder,\n\t\t\tdata_set,\n\t\t\targmaxx,\n\t\t\tpredictions):\n\ttrue_count = 0\n\tsteps_per_epoch = data_set.num_examples // batch_size\n\tnum_examples = steps_per_epoch * batch_size\n\tfor step in range(steps_per_epoch):\n\t\tfeed_dict,names_feed,labels_feed = fill_feed_dict( data_set,\n\t\t\t\t\t\t\t\t\t\t\t images_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t labels_placeholder)\n\t\ttrue_count += sess.run(eval_correct, feed_dict=feed_dict)\n\tprecision = float(true_count) / num_examples\n\treturn num_examples, true_count, precision\n\ndef rd():\n\treturn random.uniform(0,0.25)\n\ndef resultPlot(loss, train_acc, test_acc):\n\tx = np.arange(0, len(loss), 1)\n\n\tfig, ax1 = plt.subplots(figsize=(7,4))\n\tax2 = ax1.twinx()\n\n\tplt.title(\"Loss & Accuracy\")\n\n\tl1, = ax1.plot(x, loss, color = 'green', linestyle = \"-\", linewidth = 1)\n\tl2, = ax2.plot(x, train_acc, color = 'red', linestyle = \"--\", marker = \"x\", linewidth = 1)\n\tl3, = ax2.plot(x, test_acc, color = 'blue', linestyle = \"-\", marker = \"x\", linewidth = 1)\n\tplt.ylim(0, 1)\n\tplt.legend([l1,l2,l3], ['Loss', 'Train_acc', 'Test_acc'], loc = 0)\n\n\tax1.set_xlabel('Epoch')\n\tax1.set_ylabel('Loss')\n\tax2.set_ylabel('Accuracy')\n\t# plt.show()\n\tplt.savefig(os.path.join(model_path, model_name, \"plotoutput.jpg\"), bbox_inches = 'tight')\n\ndef parse_args():\n\t\"\"\"Parse input arguments.\"\"\"\n\tparser = argparse.ArgumentParser(description='Image Classification -- Training')\n\tparser.add_argument('--batch_size', dest='batch_size', default=16)\n\tparser.add_argument('--image_path', dest='image_path', default=None)\n\tparser.add_argument('--learning_rate', dest='learning_rate', default=0.001)\n\tparser.add_argument('--max_iter', dest='max_iter', default=1000)\n\tparser.add_argument('--num_class', dest='num_class', default=None)\n\tparser.add_argument('--model_name', dest='model_name', default=None)\n\tparser.add_argument('--network', dest='network', default=\"GoogleNet\")\n\tparser.add_argument('--is_pretrain', dest='is_pretrain', default=\"1\")\n\tparser.add_argument('--model_path', dest='model_path', default=None)\n\tparser.add_argument('--databatch', dest='databatch', default=300)\n\tparser.add_argument('--optimizer', dest='optimizer', default=\"GD\")\n\tparser.add_argument('--dataAug', dest='dataAug', default=0)\n\tparser.add_argument('--gpu', dest='gpu', default=\"0\")\n\targs = parser.parse_args()\n\treturn args\n\nif __name__ == '__main__':\n\tstart = time.time()\n\t# use the gpu as much as the code needs\n\ttfconfig = tf.ConfigProto(allow_soft_placement=True)\n\ttfconfig.gpu_options.allow_growth = True\n\t# Variable initiation\n\targs = parse_args()\n\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\tprint(args)\n\tbatch_size = int(args.batch_size)\n\timage_path = args.image_path\n\tlearning_rate = float(args.learning_rate)\n\tmax_iter = int(args.max_iter)\n\tnum_class = int(args.num_class)\n\tmodel_name = args.model_name\n\tnetwork = args.network\n\tis_pretrain = args.is_pretrain\n\tmodel_path = args.model_path\n\tdatabatch = int(args.databatch)\n\tdataAugFlag = int(args.dataAug)\n\toptimizer_flag = args.optimizer\n\tplot_loss_flag = 0\n\n\tbatchCnt, classList, imageNum, imageList, alpha = load_training_data()\n\n\ttrain_eval = [[0]*3 for i in range(batchCnt)]\n\ttest_eval = [[0]*3 for i in range(batchCnt)]\n\tprint(\"batch_num = {}\".format(batchCnt))\n\tprint(\"classList = {}\".format(classList))\n\tprint(\"image_num = {}\".format(imageNum))\n\tsys.stdout.flush()\n\n\tdataAugNum = 5\n\n\ttrain_acc = [0]\n\ttest_acc = [0]\n\tplot_loss = []\n\n\tfile_object2 = open('train_info.txt', 'w')\n\n\tif(imageNum == 0):\n\t\tprint(\"No Image\")\n\telif(imageNum > 0):\n\t\tfile_object2.write(\"{}\\n\".format(imageNum))\n\t\twith tf.Graph().as_default():\n\t\t\tif(network == \"GoogleNet\"):\n\t\t\t\timages_placeholder = tf.placeholder(tf.float32, [batch_size, 224, 224, 3])\n\t\t\telif(network == \"AlexNet\"):\n\t\t\t\timages_placeholder = tf.placeholder(tf.float32, [batch_size, 227, 227, 3])\n\t\t\tlabels_placeholder = tf.placeholder(tf.int32, [batch_size])\n\t\t\tif(network == \"GoogleNet\"):\n\t\t\t\tnet = GoogleNet({'data': images_placeholder})\n\t\t\telif(network == \"AlexNet\"):\n\t\t\t\tnet = AlexNet({'data': images_placeholder})\n\n\t\t\tlogits = net.layers['fcoutput']\n\t\t\tpred = tf.nn.softmax(logits)\n\t\t\tloss = focal_loss(pred, labels_placeholder)\n\t\t\ttrain_op = training(loss, learning_rate)\n\t\t\teval_correct,argmaxx = evaluation(pred, labels_placeholder)\n\n\t\t\tsaver = tf.train.Saver(tf.global_variables())\n\t\t\t\n\t\t\twith tf.Session(config=tfconfig) as sess:\n\t\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\t\tif(is_pretrain == \"1\"):\n\t\t\t\t\tlatest = tf.train.latest_checkpoint(model_path + '/' + model_name + '/')\n\t\t\t\t\tif not latest:\n\t\t\t\t\t\tif(network == \"GoogleNet\"):\n\t\t\t\t\t\t\tnet.load('imagenet_weight/googlenet.npy', sess)\n\t\t\t\t\t\telif(network == \"AlexNet\"):\n\t\t\t\t\t\t\tnet.load('imagenet_weight/alexnet.npy', sess)\n\t\t\t\t\t\tprint(\"Model loaded from npy\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tsaver.restore(sess, latest)\n\t\t\t\t\t\tprint(\"Model restored from {}\".format(latest))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\t\n\t\t\t\tepoch = 0\n\n\t\t\t\tfor x in range(batchCnt):\n\t\t\t\t\timages = []\n\t\t\t\t\tlabels = []\n\t\t\t\t\timgname = []\n\t\t\t\t\tprint('Data Augmentation batch %d started' % (x+1))\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tif(x == (batchCnt - 1)):\n\t\t\t\t\t\timagelen = len(imageList) - x * databatch\n\t\t\t\t\telse:\n\t\t\t\t\t\timagelen = databatch\n\t\t\t\t\taugstart = time.time()\n\t\t\t\t\tfor idx in range(imagelen):\n\t\t\t\t\t\t# 输出数据增强进度信息\n\t\t\t\t\t\tif(idx % (imagelen/10) == 0):\n\t\t\t\t\t\t\taugend = time.time()\n\t\t\t\t\t\t\tprint('Data Augmentation batch %d -- %d%% finished (%.2f sec)' % (x+1,int(float(idx)*100/float(imagelen)),augend-augstart))\n\t\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\t\taugstart = time.time()\n\n\t\t\t\t\t\tabspath = imageList[idx + databatch * x]\n\t\t\t\t\t\timg = cv2.imread(abspath) #opencv读取文件\n\n\t\t\t\t\t\tif(network == \"GoogleNet\"):\n\t\t\t\t\t\t\tresize = cv2.resize(img,(224,224)).astype(\"float32\") #resize到(224,224)尺寸\n\t\t\t\t\t\telif(network == \"AlexNet\"):\n\t\t\t\t\t\t\tresize = cv2.resize(img,(227,227)).astype(\"float32\") #resize到(227,227)尺寸\n\t\t\t\t\t\tfor i in range(num_class):\n\t\t\t\t\t\t\tif(abspath.find(classList[i]) > 0):\n\t\t\t\t\t\t\t\tlab = i\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\timages.append(resize)\n\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\timgname.append(abspath)\n\n\t\t\t\t\t\tif(dataAugFlag):\n\t\t\t\t\t\t\t# 数据增强 \n\t\t\t\t\t\t\trows,cols,channel = np.shape(resize)\n\t\t\t\t\t\t\t# 旋转90度\n\t\t\t\t\t\t\tM = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)\n\t\t\t\t\t\t\tdst = cv2.warpAffine(resize,M,(cols,rows))\n\t\t\t\t\t\t\timages.append(dst)\n\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\timgname.append(abspath + \"90d\")\n\t\t\t\t\t\t\t# 旋转180度\n\t\t\t\t\t\t\tM = cv2.getRotationMatrix2D((cols/2,rows/2),180,1)\n\t\t\t\t\t\t\tdst = cv2.warpAffine(resize,M,(cols,rows))\n\t\t\t\t\t\t\timages.append(dst)\n\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\timgname.append(abspath + \"180d\")\n\t\t\t\t\t\t\t# 旋转270度\n\t\t\t\t\t\t\tM = cv2.getRotationMatrix2D((cols/2,rows/2),270,1)\n\t\t\t\t\t\t\tdst = cv2.warpAffine(resize,M,(cols,rows))\n\t\t\t\t\t\t\timages.append(dst)\n\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\timgname.append(abspath + \"270d\")\n\t\t\t\t\t\t\t# 随机仿射变换 5次数据增强\n\t\t\t\t\t\t\tfor j in range(dataAugNum):\n\t\t\t\t\t\t\t\tSrcPoints = np.float32([[0,0],[cols-1,0],[0,rows-1]])\n\t\t\t\t\t\t\t\tCanvasPoints = np.float32([[cols*rd(),rows*rd()],[cols*(1-rd()),rows*rd()],[cols*rd(),rows*(1-rd())]])\n\t\t\t\t\t\t\t\tAffineMatrix = cv2.getAffineTransform(np.array(SrcPoints), np.array(CanvasPoints))\n\t\t\t\t\t\t\t\tAffineImg = cv2.warpAffine(resize, AffineMatrix, (cols,rows))\n\t\t\t\t\t\t\t\timages.append(AffineImg)\n\t\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\t\timgname.append(abspath + \"-affine\" + str(j))\n\t\t\t\t\t\t\t# 水平翻转\n\t\t\t\t\t\t\tiLR = copy.deepcopy(resize)\n\t\t\t\t\t\t\tfor l in range(rows):\n\t\t\t\t\t\t\t\tfor j in range(cols):\n\t\t\t\t\t\t\t\t\tiLR[l,j]=resize[l,cols-1-j]\n\t\t\t\t\t\t\timages.append(iLR)\n\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\timgname.append(abspath + \"-sym\")\n\t\t\t\t\t\t\t# 水平翻转后随机仿射变换 5次数据增强\n\t\t\t\t\t\t\tfor j in range(dataAugNum):\n\t\t\t\t\t\t\t\tSrcPoints = np.float32([[0,0],[cols-1,0],[0,rows-1]])\n\t\t\t\t\t\t\t\tCanvasPoints = np.float32([[cols*rd(),rows*rd()],[cols*(1-rd()),rows*rd()],[cols*rd(),rows*(1-rd())]])\n\t\t\t\t\t\t\t\tAffineMatrix = cv2.getAffineTransform(np.array(SrcPoints), np.array(CanvasPoints))\n\t\t\t\t\t\t\t\tAffineImg = cv2.warpAffine(iLR, AffineMatrix, (cols,rows))\n\t\t\t\t\t\t\t\timages.append(AffineImg)\n\t\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\t\timgname.append(abspath + \"-symaffine\" + str(j))\n\t\t\t\t\t\t\t# 增加亮度\n\t\t\t\t\t\t\tbrighter = exposure.adjust_gamma(resize, 0.5)\n\t\t\t\t\t\t\timages.append(np.array(brighter))\n\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\timgname.append(abspath + \"-brighter\")\n\t\t\t\t\t\t\t# 减少亮度\n\t\t\t\t\t\t\tdarker = exposure.adjust_gamma(resize, 1.5)\n\t\t\t\t\t\t\timages.append(darker)\n\t\t\t\t\t\t\tlabels.append(lab)\n\t\t\t\t\t\t\timgname.append(abspath + \"-darker\")\n\t\t\t\t\tprint('Data Augmentation batch %d all finished' % (x+1))\n\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\t\timages = np.array(images)\n\t\t\t\t\tlabels = np.array(labels)\n\t\t\t\t\timgname = np.array(imgname)\n\n\t\t\t\t\ttrainImages, trainLabels, trainNames = get_other_data(os.path.join(image_path, \"ImageSets\", \"train.txt\"))\n\t\t\t\t\ttestImages, testLabels, testNames = get_other_data(os.path.join(image_path, \"ImageSets\", \"test.txt\"))\n\t\t\t\t\tvalidImages, validLabels, validNames = get_other_data(os.path.join(image_path, \"ImageSets\", \"valid.txt\"))\n\n\t\t\t\t\t# Create the dataset\n\t\t\t\t\toriginalTrainset = DataSet(trainImages, trainLabels, trainNames)\n\t\t\t\t\ttrainset = DataSet(images, labels, imgname)\n\t\t\t\t\ttestset = DataSet(testImages, testLabels, testNames)\n\t\t\t\t\tvalidset = DataSet(validImages, validLabels, validNames)\n\t\t\t\t\tprint('Dataset finished')\n\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\t\tmax_precision = -9999\n\t\t\t\t\tmin_loss = 9999\n\t\t\t\t\toptical_iter = 0\n\t\t\t\t\tprint(\"--------------------------------------\")\n\t\t\t\t\tprint('Databatch %d start training ---' % (x+1))\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tnum_one_epoch = len(trainset.images)\n\t\t\t\t\tcc = 0\n\t\t\t\t\tfor i in range(max_iter):\n\t\t\t\t\t\tstart_time = time.time()\n\t\t\t\t\t\tif(i == 0):\n\t\t\t\t\t\t\ttemp_time = time.time()\n\n\t\t\t\t\t\tfeed_dict,names_feed,_ = fill_feed_dict(trainset, images_placeholder, labels_placeholder)\n\t\t\t\t\t\tnp_loss, _ = sess.run([loss, train_op], feed_dict=feed_dict)\n\t\t\t\t\t\t# 每隔max_iter/10输出一次loss信息\n\t\t\t\t\t\tif (i + 1) % (max_iter / 10) == 0:\n\t\t\t\t\t\t\tduration = time.time() - temp_time\n\t\t\t\t\t\t\ttemp_time = time.time()\n\t\t\t\t\t\t\tprint('Step %d: loss = %.4f (%.3f sec)' % (i+1, np_loss, duration))\n\t\t\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\t\t\t# 每个epoch以及最后一次iteration进行evaluation,计算训练集和���试集的accuracy\n\t\t\t\t\t\tcc += batch_size\n\t\t\t\t\t\tif(cc > num_one_epoch or (i + 1) == max_iter):\n\t\t\t\t\t\t\t# print(\"plot_loss = {}\".format(plot_loss))\n\t\t\t\t\t\t\tif(epoch == 0):\n\t\t\t\t\t\t\t\tplot_loss.append(np_loss * 10)\n\t\t\t\t\t\t\tplot_loss.append(np_loss)\n\t\t\t\t\t\t\tprint(\"Iteration = {}, Epoch = {}\".format(i,epoch))\n\t\t\t\t\t\t\tepoch += 1\n\t\t\t\t\t\t\tcc -= num_one_epoch\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\tprint('Train Data Eval: (Iteration = {}, Epoch = {})'.format(i+1, epoch))\n\t\t\t\t\t\t\ttrain_eval[x][0], train_eval[x][1], train_eval[x][2] = do_eval(sess,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\teval_correct,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timages_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlabels_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttrainset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targmaxx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpred)\n\t\t\t\t\t\t\t# if((i + 1) != max_iter):\n\t\t\t\t\t\t\ttrain_acc.append(train_eval[x][2])\n\t\t\t\t\t\t\tprint(' Num examples: {} Num correct: {} Precision @ 1: {:.4f}'.format(train_eval[x][0], train_eval[x][1], train_eval[x][2]))\n\n\n\t\t\t\t\t\t\tprint('Test Data Eval: (Iteration = {}, Epoch = {})'.format(i+1, epoch))\n\t\t\t\t\t\t\ta,b,c = do_eval(sess,\n\t\t\t\t\t\t\t\t\t\t\teval_correct,\n\t\t\t\t\t\t\t\t\t\t\timages_placeholder,\n\t\t\t\t\t\t\t\t\t\t\tlabels_placeholder,\n\t\t\t\t\t\t\t\t\t\t\tvalidset,\n\t\t\t\t\t\t\t\t\t\t\targmaxx,\n\t\t\t\t\t\t\t\t\t\t\tpred)\n\t\t\t\t\t\t\tprint(' Num examples: {} Num correct: {} Precision @ 1: {:.4f}'.format(a, b, c))\n\t\t\t\t\t\t\t# if((i + 1) != max_iter):\n\t\t\t\t\t\t\ttest_acc.append(c)\n\t\t\t\t\t\t\tif(b > max_precision):\t# 保存最佳模型\n\t\t\t\t\t\t\t\ttest_eval[x][0] = a;\n\t\t\t\t\t\t\t\ttest_eval[x][1] = b;\n\t\t\t\t\t\t\t\ttest_eval[x][2] = c;\n\t\t\t\t\t\t\t\tmax_precision = b\n\t\t\t\t\t\t\t\toptical_iter = i + 1\n\t\t\t\t\t\t\t\tcheckpoint_path = model_path + \"/\" + model_name + \"/\" + model_name + \".ckpt\"\n\t\t\t\t\t\t\t\tsaver.save(sess, checkpoint_path)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tprint(np.shape(plot_loss), np.shape(train_acc), np.shape(test_acc))\n\t\t\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\t\t\t\tresultPlot(plot_loss, train_acc, test_acc)\n\n\t\t\t\ttrain_eval[x][0], train_eval[x][1], train_eval[x][2] = do_eval(sess,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\teval_correct,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timages_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlabels_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toriginalTrainset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targmaxx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpred)\n\t\t\t\ttest_eval[x][0], test_eval[x][1], test_eval[x][2] = do_eval(sess,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\teval_correct,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timages_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlabels_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttestset,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targmaxx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpred)\n\t\t# 记录训练集和测试集的评估结果\n\t\tfile_object2.write(\"{}\\n{}\\n{}\\n\".format(train_eval[x][0], train_eval[x][1], train_eval[x][2]))\n\t\tfile_object2.write(\"{}\\n{}\\n{}\\n\".format(test_eval[x][0], test_eval[x][1], test_eval[x][2]))\n\tfile_object2.close()\n\n\twholetime = time.time() - start\n\tprint(\"Whole Duration : %.2fs (%.2fmins)\" % (wholetime,wholetime/60))\n\n","sub_path":"build-ML_Platform-Desktop_Qt_5_8_0_GCC_64bit-Release/python/cnn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"404954384","text":"\"\"\"Extension argument processing code\n\"\"\"\n__all__ = [\n 'Message', 'NamespaceMap', 'no_default', 'registerNamespaceAlias',\n 'OPENID_NS', 'BARE_NS', 'OPENID1_NS', 'OPENID2_NS', 'SREG_URI',\n 'IDENTIFIER_SELECT'\n]\n\nimport copy\nimport warnings\nimport urllib.request\nimport urllib.error\n\nfrom openid import oidutil\nfrom openid import kvform\ntry:\n ElementTree = oidutil.importElementTree()\nexcept ImportError:\n # No elementtree found, so give up, but don't fail to import,\n # since we have fallbacks.\n ElementTree = None\n\n# This doesn't REALLY belong here, but where is better?\nIDENTIFIER_SELECT = 'http://specs.openid.net/auth/2.0/identifier_select'\n\n# URI for Simple Registration extension, the only commonly deployed\n# OpenID 1.x extension, and so a special case\nSREG_URI = 'http://openid.net/sreg/1.0'\n\n# The OpenID 1.X namespace URI\nOPENID1_NS = 'http://openid.net/signon/1.0'\nTHE_OTHER_OPENID1_NS = 'http://openid.net/signon/1.1'\n\nOPENID1_NAMESPACES = OPENID1_NS, THE_OTHER_OPENID1_NS\n\n# The OpenID 2.0 namespace URI\nOPENID2_NS = 'http://specs.openid.net/auth/2.0'\n\n# The namespace consisting of pairs with keys that are prefixed with\n# \"openid.\" but not in another namespace.\nNULL_NAMESPACE = oidutil.Symbol('Null namespace')\n\n# The null namespace, when it is an allowed OpenID namespace\nOPENID_NS = oidutil.Symbol('OpenID namespace')\n\n# The top-level namespace, excluding all pairs with keys that start\n# with \"openid.\"\nBARE_NS = oidutil.Symbol('Bare namespace')\n\n# Limit, in bytes, of identity provider and return_to URLs, including\n# response payload. See OpenID 1.1 specification, Appendix D.\nOPENID1_URL_LIMIT = 2047\n\n# All OpenID protocol fields. Used to check namespace aliases.\nOPENID_PROTOCOL_FIELDS = [\n 'ns',\n 'mode',\n 'error',\n 'return_to',\n 'contact',\n 'reference',\n 'signed',\n 'assoc_type',\n 'session_type',\n 'dh_modulus',\n 'dh_gen',\n 'dh_consumer_public',\n 'claimed_id',\n 'identity',\n 'realm',\n 'invalidate_handle',\n 'op_endpoint',\n 'response_nonce',\n 'sig',\n 'assoc_handle',\n 'trust_root',\n 'openid',\n]\n\n\nclass UndefinedOpenIDNamespace(ValueError):\n \"\"\"Raised if the generic OpenID namespace is accessed when there\n is no OpenID namespace set for this message.\"\"\"\n\n\nclass InvalidOpenIDNamespace(ValueError):\n \"\"\"Raised if openid.ns is not a recognized value.\n\n For recognized values, see L{Message.allowed_openid_namespaces}\n \"\"\"\n\n def __str__(self):\n s = \"Invalid OpenID Namespace\"\n if self.args:\n s += \" %r\" % (self.args[0], )\n return s\n\n\n# Sentinel used for Message implementation to indicate that getArg\n# should raise an exception instead of returning a default.\nno_default = object()\n\n# Global namespace / alias registration map. See\n# registerNamespaceAlias.\nregistered_aliases = {}\n\n\nclass NamespaceAliasRegistrationError(Exception):\n \"\"\"\n Raised when an alias or namespace URI has already been registered.\n \"\"\"\n pass\n\n\ndef registerNamespaceAlias(namespace_uri, alias):\n \"\"\"\n Registers a (namespace URI, alias) mapping in a global namespace\n alias map. Raises NamespaceAliasRegistrationError if either the\n namespace URI or alias has already been registered with a\n different value. This function is required if you want to use a\n namespace with an OpenID 1 message.\n \"\"\"\n global registered_aliases\n\n if registered_aliases.get(alias) == namespace_uri:\n return\n\n if namespace_uri in list(registered_aliases.values()):\n raise NamespaceAliasRegistrationError(\n 'Namespace uri %r already registered' % (namespace_uri, ))\n\n if alias in registered_aliases:\n raise NamespaceAliasRegistrationError('Alias %r already registered' %\n (alias, ))\n\n registered_aliases[alias] = namespace_uri\n\n\nclass Message(object):\n \"\"\"\n In the implementation of this object, None represents the global\n namespace as well as a namespace with no key.\n\n @cvar namespaces: A dictionary specifying specific\n namespace-URI to alias mappings that should be used when\n generating namespace aliases.\n\n @ivar ns_args: two-level dictionary of the values in this message,\n grouped by namespace URI. The first level is the namespace\n URI.\n \"\"\"\n\n allowed_openid_namespaces = [OPENID1_NS, THE_OTHER_OPENID1_NS, OPENID2_NS]\n\n def __init__(self, openid_namespace=None):\n \"\"\"Create an empty Message.\n\n @raises InvalidOpenIDNamespace: if openid_namespace is not in\n L{Message.allowed_openid_namespaces}\n \"\"\"\n self.args = {}\n self.namespaces = NamespaceMap()\n if openid_namespace is None:\n self._openid_ns_uri = None\n else:\n implicit = openid_namespace in OPENID1_NAMESPACES\n self.setOpenIDNamespace(openid_namespace, implicit)\n\n @classmethod\n def fromPostArgs(cls, args):\n \"\"\"Construct a Message containing a set of POST arguments.\n\n \"\"\"\n self = cls()\n\n # Partition into \"openid.\" args and bare args\n openid_args = {}\n for key, value in args.items():\n if isinstance(value, list):\n raise TypeError(\"query dict must have one value for each key, \"\n \"not lists of values. Query is %r\" % (args, ))\n\n try:\n prefix, rest = key.split('.', 1)\n except ValueError:\n prefix = None\n\n if prefix != 'openid':\n self.args[(BARE_NS, key)] = value\n else:\n openid_args[rest] = value\n\n self._fromOpenIDArgs(openid_args)\n\n return self\n\n @classmethod\n def fromOpenIDArgs(cls, openid_args):\n \"\"\"Construct a Message from a parsed KVForm message.\n\n @raises InvalidOpenIDNamespace: if openid.ns is not in\n L{Message.allowed_openid_namespaces}\n \"\"\"\n self = cls()\n self._fromOpenIDArgs(openid_args)\n return self\n\n def _fromOpenIDArgs(self, openid_args):\n ns_args = []\n\n # Resolve namespaces\n for rest, value in openid_args.items():\n try:\n ns_alias, ns_key = rest.split('.', 1)\n except ValueError:\n ns_alias = NULL_NAMESPACE\n ns_key = rest\n\n if ns_alias == 'ns':\n self.namespaces.addAlias(value, ns_key)\n elif ns_alias == NULL_NAMESPACE and ns_key == 'ns':\n # null namespace\n self.setOpenIDNamespace(value, False)\n else:\n ns_args.append((ns_alias, ns_key, value))\n\n # Implicitly set an OpenID namespace definition (OpenID 1)\n if not self.getOpenIDNamespace():\n self.setOpenIDNamespace(OPENID1_NS, True)\n\n # Actually put the pairs into the appropriate namespaces\n for (ns_alias, ns_key, value) in ns_args:\n ns_uri = self.namespaces.getNamespaceURI(ns_alias)\n if ns_uri is None:\n # we found a namespaced arg without a namespace URI defined\n ns_uri = self._getDefaultNamespace(ns_alias)\n if ns_uri is None:\n ns_uri = self.getOpenIDNamespace()\n ns_key = '%s.%s' % (ns_alias, ns_key)\n else:\n self.namespaces.addAlias(ns_uri, ns_alias, implicit=True)\n\n self.setArg(ns_uri, ns_key, value)\n\n def _getDefaultNamespace(self, mystery_alias):\n \"\"\"OpenID 1 compatibility: look for a default namespace URI to\n use for this alias.\"\"\"\n global registered_aliases\n # Only try to map an alias to a default if it's an\n # OpenID 1.x message.\n if self.isOpenID1():\n return registered_aliases.get(mystery_alias)\n else:\n return None\n\n def setOpenIDNamespace(self, openid_ns_uri, implicit):\n \"\"\"Set the OpenID namespace URI used in this message.\n\n @raises InvalidOpenIDNamespace: if the namespace is not in\n L{Message.allowed_openid_namespaces}\n \"\"\"\n if isinstance(openid_ns_uri, bytes):\n openid_ns_uri = str(openid_ns_uri, encoding=\"utf-8\")\n if openid_ns_uri not in self.allowed_openid_namespaces:\n raise InvalidOpenIDNamespace(openid_ns_uri)\n\n self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)\n self._openid_ns_uri = openid_ns_uri\n\n def getOpenIDNamespace(self):\n return self._openid_ns_uri\n\n def isOpenID1(self):\n return self.getOpenIDNamespace() in OPENID1_NAMESPACES\n\n def isOpenID2(self):\n return self.getOpenIDNamespace() == OPENID2_NS\n\n def fromKVForm(cls, kvform_string):\n \"\"\"Create a Message from a KVForm string\"\"\"\n return cls.fromOpenIDArgs(kvform.kvToDict(kvform_string))\n\n fromKVForm = classmethod(fromKVForm)\n\n def copy(self):\n return copy.deepcopy(self)\n\n def toPostArgs(self):\n \"\"\"\n Return all arguments with openid. in front of namespaced arguments.\n @return bytes\n \"\"\"\n args = {}\n\n # Add namespace definitions to the output\n for ns_uri, alias in self.namespaces.items():\n if self.namespaces.isImplicit(ns_uri):\n continue\n if alias == NULL_NAMESPACE:\n ns_key = 'openid.ns'\n else:\n ns_key = 'openid.ns.' + alias\n args[ns_key] = oidutil.toUnicode(ns_uri)\n\n for (ns_uri, ns_key), value in self.args.items():\n key = self.getKey(ns_uri, ns_key)\n # Ensure the resulting value is an UTF-8 encoded *bytestring*.\n args[key] = oidutil.toUnicode(value)\n\n return args\n\n def toArgs(self):\n \"\"\"Return all namespaced arguments, failing if any\n non-namespaced arguments exist.\"\"\"\n # FIXME - undocumented exception\n post_args = self.toPostArgs()\n kvargs = {}\n for k, v in post_args.items():\n if not k.startswith('openid.'):\n raise ValueError(\n 'This message can only be encoded as a POST, because it '\n 'contains arguments that are not prefixed with \"openid.\"')\n else:\n kvargs[k[7:]] = v\n\n return kvargs\n\n def toFormMarkup(self,\n action_url,\n form_tag_attrs=None,\n submit_text=\"Continue\"):\n \"\"\"Generate HTML form markup that contains the values in this\n message, to be HTTP POSTed as x-www-form-urlencoded UTF-8.\n\n @param action_url: The URL to which the form will be POSTed\n @type action_url: str\n\n @param form_tag_attrs: Dictionary of attributes to be added to\n the form tag. 'accept-charset' and 'enctype' have defaults\n that can be overridden. If a value is supplied for\n 'action' or 'method', it will be replaced.\n @type form_tag_attrs: {unicode: unicode}\n\n @param submit_text: The text that will appear on the submit\n button for this form.\n @type submit_text: unicode\n\n @returns: A string containing (X)HTML markup for a form that\n encodes the values in this Message object.\n @rtype: str\n \"\"\"\n if ElementTree is None:\n raise RuntimeError('This function requires ElementTree.')\n\n assert action_url is not None\n\n form = ElementTree.Element('form')\n\n if form_tag_attrs:\n for name, attr in form_tag_attrs.items():\n form.attrib[name] = attr\n\n form.attrib['action'] = oidutil.toUnicode(action_url)\n form.attrib['method'] = 'post'\n form.attrib['accept-charset'] = 'UTF-8'\n form.attrib['enctype'] = 'application/x-www-form-urlencoded'\n\n for name, value in self.toPostArgs().items():\n attrs = {\n 'type': 'hidden',\n 'name': oidutil.toUnicode(name),\n 'value': oidutil.toUnicode(value)\n }\n form.append(ElementTree.Element('input', attrs))\n\n submit = ElementTree.Element(\n 'input',\n {'type': 'submit',\n 'value': oidutil.toUnicode(submit_text)})\n form.append(submit)\n\n return str(ElementTree.tostring(form, encoding='utf-8'),\n encoding=\"utf-8\")\n\n def toURL(self, base_url):\n \"\"\"Generate a GET URL with the parameters in this message\n attached as query parameters.\"\"\"\n return oidutil.appendArgs(base_url, self.toPostArgs())\n\n def toKVForm(self):\n \"\"\"Generate a KVForm string that contains the parameters in\n this message. This will fail if the message contains arguments\n outside of the 'openid.' prefix.\n \"\"\"\n return kvform.dictToKV(self.toArgs())\n\n def toURLEncoded(self):\n \"\"\"Generate an x-www-urlencoded string\"\"\"\n args = sorted(self.toPostArgs().items())\n return urllib.parse.urlencode(args)\n\n def _fixNS(self, namespace):\n \"\"\"Convert an input value into the internally used values of\n this object\n\n @param namespace: The string or constant to convert\n @type namespace: str or unicode or BARE_NS or OPENID_NS\n \"\"\"\n if isinstance(namespace, bytes):\n namespace = str(namespace, encoding=\"utf-8\")\n\n if namespace == OPENID_NS:\n if self._openid_ns_uri is None:\n raise UndefinedOpenIDNamespace('OpenID namespace not set')\n else:\n namespace = self._openid_ns_uri\n\n if namespace != BARE_NS and not isinstance(namespace, str):\n raise TypeError(\n \"Namespace must be BARE_NS, OPENID_NS or a string. got %r\" %\n (namespace, ))\n\n if namespace != BARE_NS and ':' not in namespace:\n fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'\n warnings.warn(fmt % (namespace, ), DeprecationWarning)\n\n if namespace == 'sreg':\n fmt = 'Using %r instead of \"sreg\" as namespace'\n warnings.warn(\n fmt % (SREG_URI, ),\n DeprecationWarning, )\n return SREG_URI\n\n return namespace\n\n def hasKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n return (namespace, ns_key) in self.args\n\n def getKey(self, namespace, ns_key):\n \"\"\"Get the key for a particular namespaced argument\"\"\"\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n\n ns_alias = self.namespaces.getAlias(namespace)\n\n # No alias is defined, so no key can exist\n if ns_alias is None:\n return None\n\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n\n return 'openid.' + tail\n\n def getArg(self, namespace, key, default=None):\n \"\"\"Get a value for a namespaced key.\n\n @param namespace: The namespace in the message for this key\n @type namespace: str\n\n @param key: The key to get within this namespace\n @type key: str\n\n @param default: The value to use if this key is absent from\n this message. Using the special value\n openid.message.no_default will result in this method\n raising a KeyError instead of returning the default.\n\n @rtype: str or the type of default\n @raises KeyError: if default is no_default\n @raises UndefinedOpenIDNamespace: if the message has not yet\n had an OpenID namespace set\n \"\"\"\n namespace = self._fixNS(namespace)\n args_key = (namespace, key)\n try:\n return self.args[args_key]\n except KeyError:\n if default is no_default:\n raise KeyError((namespace, key))\n else:\n return default\n\n def getArgs(self, namespace):\n \"\"\"Get the arguments that are defined for this namespace URI\n\n @returns: mapping from namespaced keys to values\n @returntype: dict of {str:bytes}\n \"\"\"\n namespace = self._fixNS(namespace)\n args = []\n for ((pair_ns, ns_key), value) in self.args.items():\n if pair_ns == namespace:\n if isinstance(ns_key, bytes):\n k = str(ns_key, encoding=\"utf-8\")\n else:\n k = ns_key\n if isinstance(value, bytes):\n v = str(value, encoding=\"utf-8\")\n else:\n v = value\n args.append((k, v))\n return dict(args)\n\n def updateArgs(self, namespace, updates):\n \"\"\"Set multiple key/value pairs in one call\n\n @param updates: The values to set\n @type updates: {unicode:unicode}\n \"\"\"\n namespace = self._fixNS(namespace)\n for k, v in updates.items():\n self.setArg(namespace, k, v)\n\n def setArg(self, namespace, key, value):\n \"\"\"Set a single argument in this namespace\"\"\"\n assert key is not None\n assert value is not None\n namespace = self._fixNS(namespace)\n # try to ensure that internally it's consistent, at least: str -> str\n if isinstance(value, bytes):\n value = str(value, encoding=\"utf-8\")\n self.args[(namespace, key)] = value\n if not (namespace is BARE_NS):\n self.namespaces.add(namespace)\n\n def delArg(self, namespace, key):\n namespace = self._fixNS(namespace)\n del self.args[(namespace, key)]\n\n def __repr__(self):\n return \"<%s.%s %r>\" % (self.__class__.__module__,\n self.__class__.__name__, self.args)\n\n def __eq__(self, other):\n return self.args == other.args\n\n def __ne__(self, other):\n return not (self == other)\n\n def getAliasedArg(self, aliased_key, default=None):\n if aliased_key == 'ns':\n return self.getOpenIDNamespace()\n\n if aliased_key.startswith('ns.'):\n uri = self.namespaces.getNamespaceURI(aliased_key[3:])\n if uri is None:\n if default == no_default:\n raise KeyError\n else:\n return default\n else:\n return uri\n\n try:\n alias, key = aliased_key.split('.', 1)\n except ValueError:\n # need more than x values to unpack\n ns = None\n else:\n ns = self.namespaces.getNamespaceURI(alias)\n\n if ns is None:\n key = aliased_key\n ns = self.getOpenIDNamespace()\n\n return self.getArg(ns, key, default)\n\n\nclass NamespaceMap(object):\n \"\"\"Maintains a bijective map between namespace uris and aliases.\n \"\"\"\n\n def __init__(self):\n self.alias_to_namespace = {}\n self.namespace_to_alias = {}\n self.implicit_namespaces = []\n\n def getAlias(self, namespace_uri):\n return self.namespace_to_alias.get(namespace_uri)\n\n def getNamespaceURI(self, alias):\n return self.alias_to_namespace.get(alias)\n\n def iterNamespaceURIs(self):\n \"\"\"Return an iterator over the namespace URIs\"\"\"\n return iter(self.namespace_to_alias)\n\n def iterAliases(self):\n \"\"\"Return an iterator over the aliases\"\"\"\n return iter(self.alias_to_namespace)\n\n def items(self):\n \"\"\"Iterate over the mapping\n\n @returns: iterator of (namespace_uri, alias)\n \"\"\"\n return self.namespace_to_alias.items()\n\n def addAlias(self, namespace_uri, desired_alias, implicit=False):\n \"\"\"Add an alias from this namespace URI to the desired alias\n \"\"\"\n if isinstance(namespace_uri, bytes):\n namespace_uri = str(namespace_uri, encoding=\"utf-8\")\n # Check that desired_alias is not an openid protocol field as\n # per the spec.\n assert desired_alias not in OPENID_PROTOCOL_FIELDS, \\\n \"%r is not an allowed namespace alias\" % (desired_alias,)\n\n # Check that desired_alias does not contain a period as per\n # the spec.\n if isinstance(desired_alias, str):\n assert '.' not in desired_alias, \\\n \"%r must not contain a dot\" % (desired_alias,)\n\n # Check that there is not a namespace already defined for\n # the desired alias\n current_namespace_uri = self.alias_to_namespace.get(desired_alias)\n if (current_namespace_uri is not None and\n current_namespace_uri != namespace_uri):\n\n fmt = ('Cannot map %r to alias %r. '\n '%r is already mapped to alias %r')\n\n msg = fmt % (namespace_uri, desired_alias, current_namespace_uri,\n desired_alias)\n raise KeyError(msg)\n\n # Check that there is not already a (different) alias for\n # this namespace URI\n alias = self.namespace_to_alias.get(namespace_uri)\n if alias is not None and alias != desired_alias:\n fmt = ('Cannot map %r to alias %r. '\n 'It is already mapped to alias %r')\n raise KeyError(fmt % (namespace_uri, desired_alias, alias))\n\n assert (desired_alias == NULL_NAMESPACE or\n type(desired_alias) in [str, str]), repr(desired_alias)\n assert namespace_uri not in self.implicit_namespaces\n self.alias_to_namespace[desired_alias] = namespace_uri\n self.namespace_to_alias[namespace_uri] = desired_alias\n if implicit:\n self.implicit_namespaces.append(namespace_uri)\n return desired_alias\n\n def add(self, namespace_uri):\n \"\"\"Add this namespace URI to the mapping, without caring what\n alias it ends up with\"\"\"\n # See if this namespace is already mapped to an alias\n alias = self.namespace_to_alias.get(namespace_uri)\n if alias is not None:\n return alias\n\n # Fall back to generating a numerical alias\n i = 0\n while True:\n alias = 'ext' + str(i)\n try:\n self.addAlias(namespace_uri, alias)\n except KeyError:\n i += 1\n else:\n return alias\n\n assert False, \"Not reached\"\n\n def isDefined(self, namespace_uri):\n return namespace_uri in self.namespace_to_alias\n\n def __contains__(self, namespace_uri):\n return self.isDefined(namespace_uri)\n\n def isImplicit(self, namespace_uri):\n return namespace_uri in self.implicit_namespaces\n","sub_path":"openid/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":22680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129564786","text":"import unittest\n\n\nclass Project:\n def __init__(self, data):\n self.data = data\n self._dependencies = {}\n self.num_dependencies = 0\n\n def add_dependency(self, dependency):\n if dependency not in self._dependencies:\n self._dependencies[dependency] = dependency\n self.num_dependencies += 1\n\n\nclass Graph:\n def __init__(self):\n self.projects = []\n\n\ndef build_graph(values, edges):\n graph = Graph()\n for data, dependencies in zip(values, edges):\n project = Project(data)\n for dependency in dependencies:\n project.add_dependency(dependency)\n graph.projects.append(project)\n\n return graph\n\n\ndef build_order(graph):\n build = []\n for project in graph.projects:\n if project.num_dependencies == 0:\n build.append(project)\n\n return build\n\n\nclass Test(unittest.TestCase):\n def test_build_order(self):\n projects = ['a', 'b', 'c', 'd', 'e', 'f']\n dependencies = [['f'], ['f'], ['d'], ['a', 'b'], [], []]\n graph = build_graph(projects, dependencies)\n expected = ['e', 'f', 'a', 'b', 'd', 'c']\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"CCI/Python3/TreesAndGraphs/build_order.py","file_name":"build_order.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"82524824","text":"# This file was obtained from\n# wget https://github.com/mopub/greenlet-tornado/raw/master/greenlet_tornado.py\n# It was modified by\n# rodsenra@gmail.com\n# tatiana.alchueyr@gmail.com\n\n# Copyright (c) 2012 The greenlet-tornado Authors.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n# Author: Simon Radford \n# Derived from this blog article:\n# http://blog.joshhaas.com/2011/06/marrying-boto-to-tornado-greenlets-bring-them-together/\n\n\"\"\"\nThese functions allow you to seamlessly use Greenlet with Tornado.\nThis allows you to write code as if it were synchronous, and not worry about callbacks at all.\nYou also don't have to use any special patterns, such as writing everything as a generator.\n\"\"\"\nimport time\nfrom functools import wraps\n\nimport greenlet\nimport tornado.web\nfrom tornado.escape import json_decode\nfrom tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPRequest\nfrom tornado.ioloop import IOLoop\nfrom tornado.log import app_log\n\n\n# singleton objects\n_io_loop = None\n\n# Use cURL\nAsyncHTTPClient.configure(\"tornado.curl_httpclient.CurlAsyncHTTPClient\")\n\n\nclass GreenletTornadoException(Exception):\n \"\"\"\n Raised if `greenlet_fetch` is used from outside a WebHandler.\n \"\"\"\n pass\n\n\ndef greenlet_set_ioloop(io_loop=None):\n \"\"\"\n Instantiate Tornado IOLoop.\n \"\"\"\n global _io_loop\n if io_loop is None:\n _io_loop = IOLoop.instance()\n else:\n _io_loop = io_loop\n\n\ndef greenlet_fetch(request, **kwargs):\n \"\"\"\n Uses the tornado AsyncHTTPClient to execute a request, but blocks until the request\n is complete, yet still allows the tornado IOLoop to do other things in the meantime.\n\n To use this function, it must be called (either directly or indirectly) from a method\n wrapped by the tgreenlet.asynchronous decorator.\n\n The request arg may be either a string URL or an HTTPRequest object.\n If it is a string, any additional kwargs will be passed directly to AsyncHTTPClient.fetch().\n\n Returns an HTTPResponse object, or raises a tornado.httpclient.HTTPError exception\n on error (such as a timeout).\n \"\"\"\n gr = greenlet.getcurrent()\n msg = \"greenlet_fetch() can only be called (possibly indirectly) from a RequestHandler method wrapped by the greenlet_asynchronous decorator.\"\n if gr.parent is None:\n raise GreenletTornadoException(msg)\n\n def callback(response):\n \"\"\"\n Inner function\n \"\"\"\n gr.switch(response)\n\n http_client = tornado.httpclient.AsyncHTTPClient(io_loop=_io_loop)\n http_client.fetch(request, callback, **kwargs)\n\n # Now, yield control back to the master greenlet, and wait for data to be sent to us.\n response = gr.parent.switch()\n\n # Raise the exception, if any.\n response.rethrow()\n return response\n\n\ndef asynchronous(wrapped_method):\n \"\"\"\n Decorator that allows you to make async calls as if they were synchronous, by pausing the callstack and resuming it later.\n\n This decorator is meant to be used on the get() and post() methods of tornado.web.RequestHandler subclasses.\n\n It does not make sense to use the tornado.web.asynchronous decorator as well as this decorator.\n The returned wrapper method will be asynchronous, but the wrapped method will be synchronous.\n The request will be finished automatically when the wrapped method returns.\n \"\"\"\n @tornado.web.asynchronous\n @wraps(wrapped_method)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Inner function\n \"\"\"\n\n def greenlet_base_func():\n \"\"\"\n Inner function\n \"\"\"\n wrapped_method(self, *args, **kwargs)\n # sometimes the handler method may call finish - and this\n # exception will be raised if we try to finish it twice.\n try:\n self.finish()\n except RuntimeError:\n pass\n\n gr = greenlet.greenlet(greenlet_base_func)\n gr.switch()\n\n return wrapper\n\n\nclass Response(object):\n \"\"\"\n Abstracts HTTP response using same interface as requests.models.Response.\n \"\"\"\n\n def __init__(self, tornado_response=None, status_code=None, body=None):\n self.status_code = tornado_response.code\n self.text = tornado_response.body\n\n def json(self):\n \"\"\"\n Try to convert response body to dict, otherwise rasies ValueError.\n \"\"\"\n response = json_decode(self.text)\n return response\n\ndef fetch(params):\n \"\"\"\n HTTP fetch based on provided params, which must be compatible with\n tornado.httpclient.HTTPRequest constructor.\n \"\"\"\n # TO-DO: test\n try:\n request = HTTPRequest(allow_nonstandard_methods=True, **params)\n i = time.time()\n response = greenlet_fetch(request)\n f = time.time()\n app_log.info(\"{0} took {1}\".format(params[\"url\"], f - i))\n except HTTPError as e:\n response = e.response\n return Response(response)\n\n\ndef get(url, timeout=None, **kargs):\n \"\"\"\n HTTP GET with similar interface to requests.get\n\n Important:\n tornado.webRequestHandler which calls this method must be decorated\n with @tgreenlet.asynchronous\n \"\"\"\n params = kargs\n params[\"url\"] = str(url)\n params[\"method\"] = \"GET\"\n\n # TO-DO: test\n if timeout:\n params[\"connect_timeout\"] = timeout\n params[\"request_timeout\"] = timeout\n return fetch(params)\n\n\ndef post(url, data=None, timeout=None, **kargs):\n \"\"\"\n HTTP POST with similar interface to requests.post\n\n Important:\n tornado.webRequestHandler which calls this method must be decorated\n with @tgreenlet.asynchronous\n \"\"\"\n params = kargs\n params[\"url\"] = str(url)\n params[\"method\"] = \"POST\"\n params[\"body\"] = data\n\n # TO-DO: test\n if timeout:\n params[\"connect_timeout\"] = timeout\n params[\"request_timeout\"] = timeout\n return fetch(params)\n\n\ndef put(url, data=None, timeout=None, **kargs):\n \"\"\"\n HTTP PUT with similar interface to requests.put\n\n Important:\n tornado.webRequestHandler which calls this method must be decorated\n with @tgreenlet.asynchronous\n \"\"\"\n params = kargs\n params[\"url\"] = str(url)\n params[\"method\"] = \"PUT\"\n params[\"body\"] = data\n\n # TO-DO: test\n if timeout:\n params[\"connect_timeout\"] = timeout\n params[\"request_timeout\"] = timeout\n return fetch(params)\n\n\ndef delete(url, timeout=None, **kargs):\n \"\"\"\n HTTP DELETE with similar interface to requests.delete\n\n Important:\n tornado.webRequestHandler which calls this method must be decorated\n with @tgreenlet.asynchronous\n \"\"\"\n params = kargs\n params[\"url\"] = str(url)\n params[\"method\"] = \"DELETE\"\n\n # TO-DO: test\n if timeout:\n params[\"connect_timeout\"] = timeout\n params[\"request_timeout\"] = timeout\n return fetch(params)\n","sub_path":"greenlet_tornado/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"455451183","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 20 10:48:40 2020\r\n\r\n@author: Hezhuangfa/PCE\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom fbprophet import Prophet\r\n\r\n#数据加载\r\ntrain = pd.read_csv(\"train.csv\",encoding = \"gbk\")\r\nprint(train.head())\r\n\r\n#转换为pandas中的日期格式\r\ntrain[\"Datetime\"] = pd.to_datetime(train.Datetime, format=\"%d-%m-%Y %H:%M\")\r\n#将Datetime作为train的索引\r\ntrain.index = train.Datetime\r\nprint(train.head())\r\n#去掉ID列和重复的Datetime列\r\ntrain = train.drop([\"ID\",\"Datetime\"], axis=1)\r\nprint(train.head())\r\n\r\n#按照天进行采样\r\ndaily_train = train.resample(\"D\").sum()\r\nprint(daily_train.head())\r\ndaily_train[\"ds\"] = daily_train.index\r\ndaily_train[\"y\"] = daily_train.Count #设置为ds、y的保留字\r\nprint(daily_train.head())\r\ndaily_train.drop([\"Count\"], axis=1, inplace=True)\r\nprint(daily_train.head())\r\n\r\n#拟合Prophet模型并做训练\r\nmodel = Prophet(yearly_seasonality=True, daily_seasonality=True, seasonality_prior_scale=0.1)\r\nmodel.fit(daily_train)\r\n#预测未来7个月,213天\r\nfuture = model.make_future_dataframe(periods=213)\r\nforecast = model.predict(future)\r\nprint(forecast)\r\nmodel.plot(forecast)\r\n#查看各个成分\r\nmodel.plot_components(forecast)\r\n\r\n# 对节假日建模\r\n# 将节日看成是一个正态分布,把活动期间当做波峰,lower_window 以及upper_window 的窗口作为扩散\r\nchinese_seasons = pd.DataFrame({\r\n 'holiday': 'chinese_season',\r\n 'ds': pd.to_datetime(['2012-01-01', '2012-05-01', '2012-10-01',\r\n '2013-01-01', '2013-05-01', '2013-10-01',\r\n '2014-01-01', '2014-05-01', '2014-10-01',\r\n '2015-01-01', '2015-05-01', '2015-10-01']),\r\n 'lower_window': 0,\r\n 'upper_window': 1,\r\n})\r\nprint(chinese_seasons)\r\n\r\nmodel = Prophet(holidays=chinese_seasons, daily_seasonality=True)\r\nmodel.fit(daily_train)\r\nfuture = model.make_future_dataframe(periods=213)\r\nforecast = model.predict(future)\r\n#print(forecast.tail())\r\n# 预测的成分分析绘图,展示预测中的趋势、周效应和年度效应,holidays项\r\nmodel.plot_components(forecast)\r\n#print(forecast.columns)\r\n","sub_path":"Lesson6/jetrail_forecast.py","file_name":"jetrail_forecast.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"363661576","text":"# -*- coding: utf-8 -*-\n# __author__: MUSIBII\n# __email__ : shaozuanzuan@gmail.com\n# __file__ : encrypt.py\n# __time__ : 2019-04-26 14:42\n\n# import hmac\n# import time\n# import json\n# import base64\n# from hashlib import sha1\n# from Crypto.Cipher import AES\n#\n# from src import exception\n# from lib.conf.config import settings\n#\n#\n# def time_to_float(format_data_time):\n# \"\"\"通过格式化时间获得浮点数时间\"\"\"\n# time_tuple = time.strptime(format_data_time, \"%Y-%m-%dT%H:%M:%SZ\")\n# float_time = time.mktime(time_tuple)\n# return float_time\n#\n# def python_to_json(value):\n#\n# return json.dumps(value,ensure_ascii=False)\n#\n# def json_to_python(json_str):\n#\n# return json.loads(json_str,encoding=\"utf-8\")\n#\n# class TimeEncrypt(object):\n#\n# def __init__(self):\n# self.AccessKeySecret = settings.SECRET_KEY\n# if self.AccessKeySecret is None:\n# raise exception.MissKeyError(\"项目缺少唯一密钥\")\n#\n# def param(self, data=None):\n# format_time = self.random_string()\n# unsgin_string = self.generate_unsign(format_time, data)\n# sign = self.sign(unsgin_string.encode(\"utf-8\"))\n#\n# return format_time, sign\n#\n# def sign(self, unsign_string):\n# sign_secret = self.AccessKeySecret.encode(\"utf-8\")\n# # 利用 hmac 对其 按照sha1方式进行加密\n# signature = hmac.new(sign_secret, unsign_string, sha1).digest()\n# # 加密后获得其base64编码的值\n# sign = base64.b64encode(signature)\n# return sign.decode(\"utf-8\")\n#\n# def random_string(self):\n# format_time = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n# return format_time\n#\n# def generate_unsign(self, format_time, data=None):\n# parameter_dict = {\n#\n# }\n# float_time = time_to_float(format_time)\n# parameter_dict[\"float_time\"] = str(float_time + 193415)\n# parameter_dict[\"data_time\"] = format_time\n# if data:\n# data = json.dumps(data)\n# parameter_dict[\"data\"] = data\n# return self.sort_para(parameter_dict)\n#\n# def sort_para(self, par_dict):\n# par_list = [(key, item) for key, item in par_dict.items()]\n# sort_par = sorted(par_list)\n# unsign = \"&\".join([\"%s=%s\" % (key, item) for key, item in sort_par])\n# return unsign\n#\n# def vert_sign(self, sign, format_time, data):\n# \"\"\"验证由服务端发送过来的签是否合法\"\"\"\n# unsgin_string = self.generate_unsign(format_time, data)\n# new_sign = self.sign(unsgin_string.encode(\"utf-8\"))\n# if new_sign == sign:\n# return True\n# return False\n#\n#\n\n# time_encrypt = TimeEncrypt()\nimport base64\n\nfrom lib.conf.config import setting\nfrom lib.commexception.BaseException import MissKeyError\n# import hmac\nimport hashlib\n\nfrom lib.timeformat.basetimeformat import times\n\n\nclass EncryptSignature:\n\n def __init__(self):\n self.time = times.ret_time\n # self.key = setting.SECRET_KEY\n self.key = 'bl)=fo*qmzxvx09*+km%r22%@#454rq1#18*$*v%=7q%qe4k6t'\n if self.key is None:\n raise MissKeyError('SECRET_KEY').error\n\n def sign(self):\n sign_secret = self.key.encode(\"utf-8\")\n # 使用 md5 对 key 进行加密,并将当前时间戳作为 salt\n m = hashlib.md5(sign_secret)\n m.update(self.time.encode())\n m = m.hexdigest()\n # 生成签名字符串\n return m\n\n def joint(self):\n return '%s|%s' % (self.sign(), self.time)\n\n\n# e = EncryptSignature()\n# print(e.joint())\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lib/encrypto/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"202328112","text":"import numpy as np\nimport torch\nimport os\nfrom torch.utils import data as data_utils\nfrom matplotlib import pyplot as plt\n\ndef normalize(Y):\n Y_min = Y.min()\n Y_max = Y.max()\n return (Y - Y_min) / (Y_max - Y_min), Y_min, Y_max\n\ndef backtransform(Y_normed, Y_min, Y_max):\n return Y_normed * (Y_max - Y_min) + Y_min\n\ndef create_path(path):\n part_path = path.split('/')\n new_path = ''\n for folder in part_path:\n new_path = os.path.join(new_path, folder)\n if not os.path.exists(new_path):\n os.mkdir(new_path)\n\nclass Network(object):\n\n def __init__(self, model):\n self.model = model\n self.eval_path = model.eval_path\n self.comment = model.comment\n self.train_losses = []\n self.test_losses = []\n self.y_min, self.y_max = None, None\n self.train_loader = None\n self.test_loader = None\n self.excluded_data = None\n self.loss_figure = None\n\n","sub_path":"Old/Convolutional/Code/Models/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64891654","text":"import netaddr\nimport pandas as pd\n\nfrom range_to_cidr import range_to_cidr\n\ndef blacklist(mode):\n\n\t'''Blacklist Loader\n\n\tWHAT: Loads blacklists in to a set of IPs for \n\t\t queries to be made using netaddr\n\n\tHOW: blacklist('bogon')\n\n\tINPUT: lo.blacklist('bogon')\n\n\tOUTPUT: an IPSet object to be with netaddr\n\n\t'''\n\n\t# client9 datacenter IP \n\n\tif mode is 'client9':\n\t\tout = pd.read_csv('https://raw.githubusercontent.com/client9/ipcat/master/datacenters.csv',\n\t\t\t\t\t\t header=None)\n\t\tout = out[[0,1]]\n\t\tout = range_to_cidr(out)\n\n\telse:\n\t\t# full bogon list\n\t\tif mode is 'bogon':\n\t\t\tout = pd.read_csv('https://www.cidr-report.org/bogons/allocspace-prefix.txt',\n\t\t\t\t\t\t\t header=None)\n\n\t\t# Botlab datacenter IP\n\t\telif mode is 'denylist':\n\t\t\tout = pd.read_csv('https://raw.githubusercontent.com/botlabio/deny-hosting-IP/master/cidr.txt',\n\t \t\t\t\t\t\t header=None)\n\n\n\t\t# cymru bogons\n\t\telif mode is 'cymru':\n\t\t\tout = pd.read_csv('http://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt',\n\t\t\t\t\t\t header=None)\n\t\t\tout = out[1:]\n\n\t\telif mode is 'firehol1':\n\t\t\tout = pd.read_csv('https://raw.githubusercontent.com/ktsaou/blocklist-ipsets/master/firehol_level1.netset',\n\t\t\t\t\t\t\t header=None,\n\t\t\t\t\t\t\t comment='#')\n\n\t\telif mode is 'firehol2':\n\t\t\tout = pd.read_csv('https://raw.githubusercontent.com/ktsaou/blocklist-ipsets/master/firehol_level2.netset',\n\t\t\t\t\t\t\t header=None,\n\t\t\t\t\t\t\t comment='#')\n\n\t\telif mode is 'firehol3':\n\t\t\tout = pd.read_csv('https://raw.githubusercontent.com/ktsaou/blocklist-ipsets/master/firehol_level3.netset',\n\t\t\t\t\t\t header=None,\n\t\t\t\t\t\t comment='#')\n\n\t\telif mode is 'firehol4':\n\t\t\tout = pd.read_csv('https://raw.githubusercontent.com/ktsaou/blocklist-ipsets/master/firehol_level4.netset',\n\t\t\t\t\t\t\t header=None,\n\t\t\t\t\t\t\t comment='#')\n\n\t\tout = netaddr.IPSet(out[0])\n\n\treturn out","sub_path":"logly/blacklist.py","file_name":"blacklist.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548198398","text":"# coding=utf-8\n\n\"\"\"Sort Colors.\n\n>>> solve = _solve\n\"\"\"\n\nimport collections\n\n\ndef _solve(nums):\n counter = collections.Counter(nums)\n i = [0]\n\n def _process(target):\n for _ in xrange(counter[target]):\n nums[i[0]] = target\n i[0] += 1\n map(_process, [0, 1, 2])\n\n\n# 这种思路的双 index 写法其具体实现其实很多变,可以有各种各样的写法,但是由于 python 没有 `++` `--`,\n# 所以最后的代码看起来也许没那么简洁,具体实现可参考 discuss 的代码\n# https://discuss.leetcode.com/topic/5422/share-my-at-most-two-pass-constant-space-10-line-solution\n# https://discuss.leetcode.com/topic/6968/four-different-solutions\ndef _solve1(nums):\n i, end0, start2 = 0, 0, len(nums) - 1\n while i <= start2:\n if nums[i] == 0 and i > end0:\n nums[end0], nums[i] = 0, nums[end0]\n end0 += 1\n elif nums[i] == 2 and i < start2:\n nums[start2], nums[i] = 2, nums[start2]\n start2 -= 1\n else:\n i += 1\n\n\n# 思路较为清晰的三 index 写法, 代码参考自\n# https://discuss.leetcode.com/topic/26181/ac-python-in-place-one-pass-solution-o-n-time-o-1-space-no-swap-no-count\ndef _solve2(nums):\n i0 = i1 = 0\n for i2, num in enumerate(nums):\n nums[i2] = 2\n if num < 2:\n nums[i1] = 1\n i1 += 1\n if num == 0:\n nums[i0] = 0\n i0 += 1\n","sub_path":"medium/75.py","file_name":"75.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"230197323","text":"from graphviz import Digraph\n\n\ndef create_graph(instances):\n \"\"\" Creates a graph of the architecture using graphviz software. Requires graphviz to be installed on the system.\n Site: www.graphviz.org\n Args:\n instances (dict): instances is a dict of the form { 'server' : [IPAddress, Port]}.\n \"\"\"\n main = Digraph(node_attr={'shape': 'record'}, edge_attr={'minlen': '1'})\n\n main.node('Individual_User', 'User\\\\n(HTTPS client with APIKEY)')\n main.node('Devices', 'Devices/Apps\\\\n(HTTPS client with APIKEY)')\n main.node('Public', 'Public')\n\n main.edge('Public', 'PublicCatalogue', label=\"read only\")\n main.edge('Individual_User',\n 'Firewall',\n _attributes={'xlabel': \"APIs to search and manage devices/apps\\\\n(secure tunnel)\"})\n main.edge('Devices', 'Firewall', label=\"APIs to produce/consume data\\\\n(secure tunnel)\")\n main.body.append('{rankdir=LR;Public, Individual_User, Devices}')\n\n group = Digraph('cluster_0', graph_attr={'shape': 'record', 'label': '[ Middleware ]', 'color': 'blue'})\n\n group.node('Persistence',\n '{Persistence|Elasticsearch' +\n ' | {0}:{1}'.format(instances[\"elasticsearch\"][0], instances[\"elasticsearch\"][1]) +\n '}')\n group.node('Broker', '{Broker|RabbitMQ' +\n ' | {0}:{1}'.format(instances[\"rabbitmq\"][0], instances[\"rabbitmq\"][1]) +\n '}')\n group.node('LDAP',\n '{Authentication and\\\\nauthorization (AA) server|ldapd' +\n ' | {0}:{1}'.format(instances[\"ldapd\"][0], instances[\"ldapd\"][1]) +\n '}',\n _attributes={'color': 'red'})\n group.node('CA',\n '{Certificate\\\\nAuthority (CA)|OpenSSL' +\n ' | {0}:{1}'.format(instances[\"certificate_authority\"][0], instances[\"certificate_authority\"][1]) +\n '}',\n _attributes={'color': 'red'})\n group.node('PublicCatalogue',\n '{Publicly available\\\\nopen catalogue' +\n ' | {0}:{1}'.format(instances[\"catalogue\"][0], instances[\"catalogue\"][1]) +\n '}')\n group.node('Firewall',\n '{API gateway + Firewall cluster \\\\n (Certificates signed by the CA) | kong + iptables' +\n ' | {0}:{1}'.format(instances[\"kong\"][0], instances[\"kong\"][1]) +\n '}',\n _attributes={'style': 'bold'})\n group.node('HistoryAnalytics',\n '{Historical data \\\\n analytics engine|Apache storm' +\n ' | {0}:{1}'.format(instances[\"apache_storm\"][0], instances[\"apache_storm\"][1]) +\n '}',\n _attributes={'color': 'darkgreen'})\n group.node('StreamAnalytics', '{Stream analytics \\\\n engine|Apache storm}',\n _attributes={'color': 'darkgreen'})\n group.node('apt_repo', '{APT Repository|Aptly' +\n ' | {0}:{1}'.format(instances[\"apt_repo\"][0], instances[\"apt_repo\"][1]) +\n '}',\n _attributes={'color': 'darkgreen'})\n group.node('DNS', '{DNS\\\\n(with security extensions)|BIND' +\n ' | {0}:{1}'.format(instances[\"bind\"][0], instances[\"bind\"][1]) +\n '}',\n _attributes={'color': 'darkgreen'})\n group.node('NTP', '{NTP server|OpenNTPD' +\n ' | {0}:{1}'.format(instances[\"openntpd\"][0], instances[\"openntpd\"][1]) +\n '}',\n _attributes={'color': 'darkgreen'})\n group.node('PolicyEnforcer', '{Security policy\\\\nenforcer and accounting|Custom scripts}',\n _attributes={'color': 'red'})\n group.node('Catalogue', '{Internal Catalogue}')\n group.node('Validation', '{Validation\\\\nserver|Apache storm}')\n group.node('point', _attributes={'shape': 'point'})\n\n group.edge('Broker', 'point', _attributes={'arrowhead': \"none\", 'style': 'dashed'})\n group.edge('Persistence', 'point', _attributes={'arrowhead': \"none\", 'style': 'dashed'})\n group.edge('Catalogue', 'point', _attributes={'arrowhead': \"none\", 'style': 'dashed'})\n group.edge('point', 'LDAP', _attributes={'style': 'dashed'})\n group.edge('Firewall', 'Validation', _attributes={'arrowhead': \"none\"})\n group.edge('Validation', 'Broker')\n group.edge('Broker', 'StreamAnalytics', _attributes={'style': \"dashed\"})\n group.edge('Validation', 'Persistence')\n group.edge('Validation', 'Catalogue')\n group.edge('Persistence', 'HistoryAnalytics', _attributes={'style': \"dashed\"})\n group.edge('CA', 'Firewall', label=\"Certificates\", _attributes={'style': \"dashed\"})\n group.body.append(\"{rank=same; CA;PolicyEnforcer;PublicCatalogue}\")\n group.body.append('{rank=same; DNS;LDAP;NTP}')\n group.body.append('{rank=same;HistoryAnalytics;StreamAnalytics}')\n main.subgraph(group)\n main.render(view=True)\n","sub_path":"ubuntu/modules/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"491113347","text":"\"\"\"\nWe get as input the size of the field in which our miner moves. The field is always a square. After that we will receive\nthe commands which represent the directions in which the miner should move. The miner starts from position - 's'. The\ncommands will be: left, right, up and down. If the miner has reached a side edge of the field and the next command\nindicates that he has to get out of the field, he must remain on his current possition and ignore the current command.\nThe possible characters that may appear on the screen are:\n* - a regular position on the field.\ne - the end of the route.\nc - coal\ns - the place where the miner starts\nEach time when the miner finds a coal, he collects it and replaces it with '*'. Keep track of the count of the collected\ncoals. If the miner collects all of the coals in the field, the program stops and you have to print the following\nmessage: \"You collected all coals! ({rowIndex}, {colIndex})\".\nIf the miner steps at 'e' the game is over (the program stops) and you have to print the following message:\n\"Game over! ({rowIndex}, {colIndex})\".\nIf there are no more commands and none of the above cases had happened, you have to print the following message:\n\"{remainingCoals} coals left. ({rowIndex}, {colIndex})\".\n\n\"\"\"\n\n\ndef read_matrix():\n rows_count = int(input())\n moves = input().split()\n matrix = []\n for i in range(rows_count):\n matrix.append(input().split())\n return (matrix, moves)\n\n\ndef starting_point(matrix):\n coal_count = 0\n row_start = None\n col_start = None\n for row in range(len(matrix)):\n for col in range(len(matrix)):\n if matrix[row][col] == 's':\n row_start, col_start = row, col\n elif matrix[row][col] == 'c':\n coal_count += 1\n return (row_start, col_start, coal_count)\n\n\ndef miner_moves(matrix, moves):\n row, col, coal_count = starting_point(matrix)\n check = False\n for move in moves:\n if move == 'left':\n if col-1 >= 0:\n col = col -1\n elif move == 'right':\n if col+1 < len(matrix):\n col = col + 1\n elif move == 'up':\n if row-1 >= 0:\n row = row -1\n elif move == 'down':\n if row+1 < len(matrix):\n row = row + 1\n\n if matrix[row][col] == 'c':\n coal_count -= 1\n matrix[row][col] = '*'\n elif matrix[row][col] == 'e':\n print(f\"Game over! ({row}, {col})\")\n check = True\n break\n\n if coal_count == 0:\n print(f\"You collected all coals! ({row}, {col})\")\n check = True\n break\n if not check:\n print(f'{coal_count} coals left. ({row}, {col})')\n\n\n\n\n\n(matrix, moves) = read_matrix()\n\nminer_moves(matrix, moves)","sub_path":"Multidimensional Lists/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"127086104","text":"import sys\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\n\nn = int(input())\n\nans = 10**3\nfor a in range(1,n):\n b = n-a\n tmp = 0\n for j in reversed(range(len(str(a)))):\n tmpa = (a//(10**j))\n a -= tmpa*(10**j)\n tmp += tmpa\n for j in reversed(range(len(str(b)))):\n tmpb = (b//(10**j))\n b -= tmpb*(10**j)\n tmp += tmpb\n\n ans = min(ans,tmp)\n\nprint(ans)","sub_path":"Python_codes/p03331/s740605213.py","file_name":"s740605213.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"357756615","text":"import json\nimport os\nimport subprocess\nimport datetime\nfrom datetime import timedelta\nfrom flask import Flask\nfrom flask import request as flask_req\nimport requests as r\n\napp = Flask(__name__, static_url_path='')\n\nAPI_URL = 'https://http-api.openbloomberg.com/request/blp/refdata/HistoricalData'\nCLIENT_KEY = 'client.key'\nCLIENT_CERT = 'client.crt'\nBLOOMBERG_CERT = 'bloomberg.crt'\nDATA_SET_SIZE = 100\nJSON_FILENAME = 'data_request.json'\n\n@app.route('/')\ndef root():\n print('Hello printline!')\n return app.send_static_file('index.html')\n\n\"\"\"Make sure you're getting a GET request with the url including ?symbol=\"\"\"\n@app.route('/stock/', methods=['GET'])\ndef new_stock_request(symbol=None):\n \"\"\"This endpoint receives the json, makes the request to the Go server, and returns the results\"\"\"\n if(symbol==None):\n return ''\n \n #retrieve data from bloomberg server\n raw_data = retrieve_from_bloomberg(symbol)\n\n #check that stock data is valid\n if not valid_data(raw_data):\n return invalid_response(symbol)\n \n #parse the json to format for go\n processable_data = parse_for_processing(raw_data)\n \n #generate response by sending data for processing\n json_response = send_for_processing(processable_data, symbol)\n \n return json_response\n\ndef valid_data(data):\n \"\"\"determines whether the data set is valid (for our purposes, nonempty)\"\"\"\n data_set = data['data'][0]['securityData']['fieldData']\n return len(data_set) > 0\n\ndef invalid_response(symbol):\n \"\"\"produces json that signifies that data is invalid\"\"\"\n response_jso = {}\n response_jso['isValid'] = False\n response_jso['symbol'] = symbol\n return json.dumps(response_jso)\n\ndef retrieve_from_bloomberg(symbol):\n \"\"\"Retrieves data on the stock symbol from bloomberg.\"\"\"\n curl_command = build_curl_command()\n build_json_file(symbol)\n json_string = subprocess.check_output(curl_command, shell=True)\n print(json_string)\n delete_json_file()\n return json.loads(json_string)\n\ndef build_json_file(symbol):\n \"\"\"makes a json file and populates it\"\"\"\n json_file = open(JSON_FILENAME, 'w')\n json_file.write(json_request(symbol))\n json_file.close()\n\ndef delete_json_file():\n \"\"\"deletes the temporary file we created\"\"\"\n os.remove(JSON_FILENAME)\n\ndef parse_for_processing(raw_data):\n \"\"\"parses the data to a format for processing\"\"\"\n daily_data = raw_data['data'][0]['securityData']['fieldData']\n return map(lambda x: x['PX_LAST'], daily_data)\n\ndef send_for_processing(data, sym):\n \"\"\"sends data for processing. returns the JSON response\n using fake data until rob gets back.\"\"\"\n\n r = request.post('http://localhost:8080', json=data)\n marshalled = r.json()\n response_jso = {}\n response_jso['isValid'] = True\n response_jso['symbol'] = sym\n response_jso['current'] = marshalled['current']\n response_jso['tomorrow'] = marshalled['tomorrow']\n response_jso['amount_increase'] = marshalled['amount_increase']\n response_jso['percent_increase'] = marshalled['percent_increase']\n response_jso['mse'] = marshalled['mse']\n return json.dumps(response_jso)\n\ndef build_curl_command():\n \"\"\"builds a curl command from a stock symbol\"\"\"\n curl_command = 'curl -X POST \"' + API_URL + '\"'\n curl_command += ' --cacert ' + BLOOMBERG_CERT\n curl_command += ' --cert ' + CLIENT_CERT\n curl_command += ' --key ' + CLIENT_KEY\n curl_command += ' --data @\"' + JSON_FILENAME + '\"'\n return curl_command\n\ndef json_request(symbol):\n \"\"\"builds a json request string!\"\"\"\n request = {}\n request['securities'] = [symbol + ' US Equity']\n request['fields'] = ['PX_LAST']\n request['startDate'] = date_minus(DATA_SET_SIZE)\n request['endDate'] = date_minus(0)\n request['periodicitySelection'] = \"DAILY\"\n\n return json.dumps(request)\n\ndef date_minus(DATA_SET_SIZE):\n \"\"\"gets the string representing the data minus x days\"\"\"\n current_date = datetime.datetime.now()\n past_date = current_date + timedelta(days=(-DATA_SET_SIZE))\n return past_date.strftime('%Y%m%d')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=False)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"14450004","text":"import bird\r\n\r\nclass SongBird(bird.Bird):\r\n def __init__(self):\r\n super(SongBird, self).__init__()\r\n self.sound = \"ABCDEFG...XYZ\"\r\n\r\n def sing(self):\r\n if not self.hanger:\r\n print(self.sound)\r\n self.hanger = True\r\n else:\r\n print(\"Too hanger to sing the song\")\r\n\r\nif __name__ == \"__main__\":\r\n b1 = SongBird()\r\n b1.sing()\r\n b1.eat()\r\n b1.sing()\r\n b1.eat()\r\n b1.fly()\r\n\r\n","sub_path":"9_topic/9_2_3_songbird_super.py","file_name":"9_2_3_songbird_super.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160500604","text":"from flask import Flask, render_template, request, redirect, send_from_directory\nimport os\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\nos.makedirs(os.path.join('static'), exist_ok=True)\n\n\n@app.route('/')\ndef hello_world():\n return render_template('/upload_video.html')\n\n@app.route('/upload-video', methods=[\"GET\",\"POST\"])\ndef upload_video():\n if request.method==\"POST\":\n if request.files:\n video.filename = \"UC\"\n #video.save(\"C:/Users/Saumya Agnihotri/PycharmProjects/VOB/static\")\n video.save(os.path.join('static', secure_filename(video.filename)))\n print(video.filename)\n temp = video.filename\n return render_template('/complete.html', value = temp)\n return render_template('/upload_video.html')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"228268393","text":"import requests\r\n\r\nresponse = requests.get(\"https://api.weather.gov/gridpoints/PQR/107,97/forecast/hourly\")\r\ndata = response.json()\r\noutF = open(\"output.json\", \"w\")\r\noutF.write(response.text)\r\n\r\nflag = 0\r\nmFlag = 0 #this does not need to be reset ever because we only get 1 weeks worth of data\r\ncurrDate = 0\r\nfor i in data['properties']['periods']: #parse the JSON\r\n strDate = i['startTime'][8] + i['startTime'][9] \r\n intDate = int(strDate)\r\n strHr = i['startTime'][11:13]\r\n intHr = int(strHr)\r\n if(intDate == 1 and mFlag == 0):\r\n currDate = 0\r\n mFlag = 1\r\n\r\n if(intDate > currDate):\r\n currDate = intDate\r\n flag = 0\r\n\r\n if(i['temperature'] >= 80 and flag == 0):\r\n print(\"On:\", i['startTime'][0:10], \"Stop process at:\", i['startTime'][11:16])\r\n flag = 1\r\n\r\n if(intHr == 23 and flag != 1):\r\n print(\"On:\", i['startTime'][0:10], \"temperature will not reach 80 degrees F\")\r\n \r\n\r\ninput(\"Press Enter to exit...\")","sub_path":"natWeather.py","file_name":"natWeather.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291799500","text":"from __future__ import print_function\nimport argparse\nimport os,sys\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\n# import torchvision.datasets as dset\n# import torchvision.transforms as transforms\n# import torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom datasets import PartDataset\nimport torch.nn.functional as F\nfrom pointnet6DV1Multiple import PointNetDenseCls\n\ndef func_miou(num_classes,target,pred_choice):\n part_ious = list()\n segl=target.detach().cpu().numpy()\n segp=pred_choice.detach().cpu().numpy()\n for l in range(num_classes):\n if l not in segl:\n continue\n else:\n if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): # part is not present, no prediction as well\n part_ious.append(1.0)\n else:\n part_ious.append( np.sum((segl==l) & (segp==l)) / float(np.sum((segl==l) | (segp==l))))\n return part_ious\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batchSize', type=int, default=16, help='input batch size')\nparser.add_argument('--num_points', type=int, default=2048, help='input batch size')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--nepoch', type=int, default=200, help='number of epochs to train for')\nparser.add_argument('--outf', type=str, default='./seg/V1M', help='output folder')\nparser.add_argument('--model', type=str, default = '', help='model path')\nparser.add_argument('--n_views', type=int, default = 13, help='view numbers')\nparser.add_argument('--lr', type=float, default = 0.001, help='learning rate')\nparser.add_argument('--momentum', type=float, default = 0.9, help='momentum')\nparser.add_argument('--classType', type=str, default = 'Bag', help='class')\nparser.add_argument('--devices',type=list,default=[0],help='multiple devices supported')\nopt = parser.parse_args()\nopt.devices=[int(i) for i in opt.devices]\nprint (opt)\n\nopt.manualSeed = random.randint(1, 10000) # fix seed\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\nclasses = ['Bag','Chair','Car','Mug','Table','Airplane','Cap','Earphone','Guitar','Knife','Lamp','Laptop','Motorbike','Pistol','Rocket','Skateboard']\ndataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False,class_choice = classes, npoints = opt.num_points)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\ntest_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, train = False, class_choice = classes,npoints = opt.num_points)\ntestdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\nprint(len(dataset), len(test_dataset))\nnum_classes = dataset.num_seg_classes\nprint('classes', num_classes)\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nblue = lambda x:'\\033[94m' + x + '\\033[0m'\n\nclassifier = PointNetDenseCls(k = num_classes,views=opt.n_views)\n\nif opt.model != '':\n print(\"Finish Loading\")\n classifier.load_state_dict(torch.load(opt.model))\n\nclassifier=torch.nn.DataParallel(classifier, device_ids=opt.devices)\ncudnn.benchmark=True\n\nif opt.model != '':\n print(\"Finish Loading\")\n classifier.load_state_dict(torch.load(opt.model))\n\noptimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=opt.momentum)\nclassifier.cuda()\n\nnum_batch = len(dataset)/opt.batchSize\nmiou_list=list()\nfor epoch in range(opt.nepoch):\n for i, data in enumerate(dataloader, 0):\n points, target = data\n points, target = Variable(points), Variable(target)\n points = points.transpose(2,1) \n points, target = points.cuda(), target.cuda() \n optimizer.zero_grad()\n classifier = classifier.train()\n pred= classifier(points)\n pred = pred.view(-1, num_classes)\n target = target.view(-1,1)[:,0] - 1\n #print(pred.size(), target.size())\n loss = F.nll_loss(pred, target)\n loss.backward()\n optimizer.step()\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.data).cpu().sum()\n print('[%d: %d/%d] train loss: %f accuracy: %f' %(epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize*opt.num_points)))\n \n if i % 100 == 0:\n\n j, data = next(enumerate(testdataloader, 0))\n points, target = data\n points, target = Variable(points), Variable(target)\n points = points.transpose(2,1) \n points, target = points.cuda(), target.cuda()\n classifier = classifier.eval()\n pred= classifier(points)\n pred = pred.view(-1, num_classes)\n target = target.view(-1,1)[:,0] - 1\n\n loss = F.nll_loss(pred, target)\n pred_choice = pred.data.max(1)[1]\n correct = pred_choice.eq(target.data).cpu().sum()\n\n ioumax=func_miou(num_classes,target,pred_choice)\n iou=sum(ioumax)/len(ioumax)\n miou_list.append(iou)\n miou=np.mean(miou_list)\n a=('V1_multiple [%d: %d/%d] %s loss: %f accuracy: %f IOU: %f mIOU %f' %(epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize*opt.num_points),iou,miou))\n f = open(opt.outf+\"/log.txt\", \"a\")\n f.write(a)\n f.close\n print(a)\n \n torch.save(classifier.state_dict(), '%s/seg_model_%d.pth' % (opt.outf, epoch))","sub_path":"train_seg_1M.py","file_name":"train_seg_1M.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"201952680","text":"class Customer:\n\n def __init__(self, name, balance):\n self.name = name\n\n self.balance = balance\n\n def buy(self, product, productAmount, seller):\n #Buy method increases Seller's balance and decreases Customer's balance\n if self._validate_buy_parameters(product, productAmount, seller):\n seller.balance += product.productPrice * productAmount # как установилась связь с продавцом?\n self.balance -= product.productPrice * productAmount # как установилась связь с продуктом?\n\n print('Customer',self.name, 'has bought:', product.productName)\n print('Number of books:', productAmount)\n print('Customers balance is:', self.balance)\n print('Seller is:', seller.name)\n print('Seller balance is:', seller.balance)\n if product.productCurrency == 'dollar':\n exchangeCoefficient = 1.2\n self.balance -= product.productPrice * exchangeCoefficient * productAmount\n print('If currency is Dollar, the balance is:', self.balance)\n else:\n return False\n\n def _validate_buy_parameters(self, *args):\n return isinstance(args[0], Product) \\\n and isinstance(args[1], int) \\\n and isinstance(args[2], Seller)\n\n\nclass Seller:\n\n def __init__(self, name, balance, product):\n self.name = name\n self.balance = balance\n self.product = product\n\n\nclass Product:\n\n def __init__(self, productName, productPrice, productCurrency):\n self.productName = productName\n self.productPrice = productPrice\n self.productCurrency = productCurrency\n\nclass Currency:\n\n def __init_(self, dollar, euro):\n self.dollar = dollar\n self.euro = euro\n\nclass Market:\n\n def __init__(self, customer, seller, product):\n self.customer = customer\n self.seller = seller\n self.product = product\n\n def deal(self):\n self.customer.buy(self.product, 7, self.seller)\n\nif __name__ == '__main__':\n customer = Customer('Nick', 100)\n\n # Create two Products:\n productBookPython = Product('Python for beginners', 10.99, 'dollar')\n # productBookLionKing = Product('The Lion King', 5.75, 'euro')\n # productBookOOPForDummies = Product('OOP For Dummies', 15.65, 'dollar')\n\n # Create two Sellers\n sellerAmazon = Seller('Amazon', 5.00, productBookPython)\n # sellerEbay = Seller('Ebay', 3.00, productBookLionKing)\n market_deal_1 = Market(customer, sellerAmazon, productBookPython)\n market_deal_1.deal()\n\n # Call Customer's buy method\n # customer.buy(productBookPython, 1, sellerAmazon)\n","sub_path":"Konstantin/HomeTask_8/HomeTask_8.py","file_name":"HomeTask_8.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248982613","text":"from django.urls import path,re_path\nfrom .views import *\n\napp_name = 'students'\nurlpatterns = [\n path('home/', student_home_view, name=\"home\"),\n path('courses/', courses_redirect_view, name=\"courses_redirect_view\"),\n path('courses/mt/', no_course_view, name=\"no_course_view\"),\n re_path(r'^courses/(?P[0-9]{10})/$', student_courses_view, name=\"courses_view\"),\n re_path(r'^courses/(?P[0-9]{10})/assignments/$', all_assignments_view, name=\"all_assignments\"),\n re_path(r'^assignments/(?P[0-9]{10})/(?P[0-9]{10})/$', assignment_details_view, name=\"assignment-details\"),\n # path('results/',student_results_view, name=\"results\"),\n path('attendance/', attendance_redirect_view, name=\"attendance_redirect_view\"),\n re_path(r'^attendance/(?P[0-9]{10})/$',student_attendance_view, name=\"attendance\"),\n path('profile/',student_profile_view, name=\"profile\"),\n path('edit-profile/',student_profile_edit,name='profile-edit'),\n path('results/', courses_redirect_view, name=\"courses_redirect_view\"),\n path('results/mt/', no_course_view, name=\"no_course_view\"),\n \n re_path(r'^results/(?P[0-9]{10})/$', student_results_view, name=\"courses_view\"),\n path('logout/',logout_view, name='logout'),\n\n\n re_path(r'^courses/(?P[0-9]{10})/doubts$', student_courses_doubt, name=\"doubt\"),\n\n\n]","sub_path":"students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"550522512","text":"import pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport yaml\nfrom dcegm.consumption_retirement_model import compute_expected_value\nfrom dcegm.consumption_retirement_model import compute_next_period_marginal_utility\nfrom dcegm.consumption_retirement_model import inverse_marginal_utility_crra\nfrom dcegm.consumption_retirement_model import utility_func_crra\nfrom dcegm.solve import solve_dcegm\nfrom numpy.testing import assert_array_almost_equal as aaae\n\n# Obtain the test directory of the package.\nTEST_DIR = Path(__file__).parent\n\n# Directory with additional resources for the testing harness\nTEST_RESOURCES_DIR = TEST_DIR / \"resources\"\n\n\ndef get_example_model(model):\n \"\"\"Return parameters and options of an example model.\"\"\"\n params = pd.read_csv(\n TEST_RESOURCES_DIR / f\"{model}.csv\", index_col=[\"category\", \"name\"]\n )\n options = yaml.safe_load((TEST_RESOURCES_DIR / f\"{model}.yaml\").read_text())\n return params, options\n\n\n@pytest.fixture()\ndef utility_functions():\n \"\"\"Return dict with utility functions.\"\"\"\n return {\n \"utility\": utility_func_crra,\n \"inverse_marginal_utility\": inverse_marginal_utility_crra,\n \"next_period_marginal_utility\": compute_next_period_marginal_utility,\n }\n\n\n@pytest.mark.parametrize(\n \"model, choice_range\",\n [\n (\"deaton\", [0]),\n (\"retirement_taste_shocks\", [1, 0]),\n (\"retirement_no_taste_shocks\", [1, 0]),\n ],\n)\ndef test_benchmark_models(model, choice_range, utility_functions):\n params, options = get_example_model(f\"{model}\")\n\n policy_calculated, value_calculated = solve_dcegm(\n params,\n options,\n utility_functions,\n compute_expected_value,\n )\n\n policy_expected = pickle.load(\n open(TEST_RESOURCES_DIR / f\"policy_{model}.pkl\", \"rb\")\n )\n value_expected = pickle.load(open(TEST_RESOURCES_DIR / f\"value_{model}.pkl\", \"rb\"))\n\n for period in range(23, -1, -1):\n for choice in choice_range:\n if model == \"deaton\":\n policy_expec = policy_expected[period, choice]\n value_expec = value_expected[period, choice]\n else:\n policy_expec = policy_expected[period][choice].T\n value_expec = value_expected[period][choice].T\n\n aaae(\n policy_calculated[period, choice, :][\n :,\n ~np.isnan(policy_calculated[period, choice, :]).any(axis=0),\n ],\n policy_expec,\n )\n aaae(\n value_calculated[period, choice, :][\n :,\n ~np.isnan(value_calculated[period, choice, :]).any(axis=0),\n ],\n value_expec,\n )\n","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"93459504","text":"import torch\nimport torch.nn as nn\n\n\nclass CrossEntropyLossWithUncertainty(nn.Module):\n \"\"\"Cross-entropy loss modified to also include uncertainty outputs.\"\"\"\n def __init__(self, size_average=True, reduce=True):\n super(CrossEntropyLossWithUncertainty, self).__init__()\n self.ce_loss = nn.CrossEntropyLoss(reduce=False)\n self.size_average = size_average\n self.reduce = reduce\n\n def forward(self, logits, labels):\n \"\"\"\n Args:\n logits: Un-normalized outputs of shape (batch_size, num_tasks, 3)\n labels: Labels of shape (batch_size, num_tasks) where -1 is uncertain, 0 is negative, 1 is positive.\n \"\"\"\n batch_size, last_dim = logits.size()\n if last_dim % 3:\n raise ValueError('Last dim should be divisible by 3, got last dim of {}'.format(last_dim))\n num_tasks = last_dim // 3\n\n logits = logits.view(batch_size * num_tasks, 3) # Fuse batch and task dimensions\n labels = (labels + 1).type(torch.int64) # Shift labels into range [0, 2]\n labels = labels.view(-1) # Flatten\n\n loss = self.ce_loss(logits, labels) # Output shape (batch_size * num_tasks,)\n loss = loss.view(batch_size, num_tasks) # Reshape and take average over batch dim\n\n if self.size_average:\n loss = loss.mean(1)\n if self.reduce:\n loss = loss.mean(0)\n\n return loss\n","sub_path":"eval/loss/cross_entropy_with_uncertainty.py","file_name":"cross_entropy_with_uncertainty.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1842912","text":"\"\"\"\nThe game over screen\n\"\"\"\n# Import the needed modules\n\nimport arcade\nimport arcade.gui\nfrom arcade.gui import UIManager, UIInputBox\nfrom buttons.my_flat_button import MyFlatButton\nimport level_select\nfrom buttons.my_input_box import MyInputBox\nfrom constants import SCREEN_WIDTH, SCREEN_HEIGHT\nfrom highscore_api import put_high_score, HighScore\n\n\nclass GameOver(arcade.View):\n def __init__(self, game_view, score: int):\n super().__init__()\n self.initials_input_box: MyInputBox = None\n self.high_score_button: MyFlatButton = None\n self.submitted = False\n self.ui_manager = UIManager()\n self.game_view = game_view\n self.score = score\n\n def on_hide_view(self):\n self.ui_manager.unregister_handlers()\n\n def on_show(self):\n arcade.set_background_color(arcade.csscolor.BLACK)\n\n arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)\n\n def on_show_view(self):\n \"\"\" Called once when view is activated. \"\"\"\n y_slot = self.window.height // 4\n left_column_x = self.window.width // 4\n right_column_x = 3 * self.window.width // 4\n print(\"Setup\")\n self.ui_manager.purge_ui_elements()\n \n # Button for restarting level\n button = MyFlatButton(\n app=self,\n text='Restart Level',\n center_x=375,\n center_y=y_slot * 1,\n width=250,\n height=100\n )\n button.set_style_attrs(\n font_color=arcade.color.WHITE,\n bg_color=(169, 169, 169),\n bg_color_hover=arcade.color.ORANGE,\n bg_color_press=arcade.color.WHITE,\n border_color=arcade.color.BLACK,\n )\n button.add_event_listener(self.restart)\n self.ui_manager.add_ui_element(button)\n\n # Button for level 2\n button = MyFlatButton(\n app=self,\n text='Main Menu',\n center_x=625,\n center_y=y_slot * 1,\n width=250,\n height=100\n )\n button.set_style_attrs(\n font_color=arcade.color.WHITE,\n bg_color=(169, 169, 169),\n bg_color_hover=arcade.color.BLUE,\n bg_color_press=arcade.color.WHITE,\n border_color=arcade.color.BLACK,\n )\n button.add_event_listener(self.level_select)\n self.ui_manager.add_ui_element(button)\n\n # Button for high score\n self.high_score_button = MyFlatButton(\n app=self,\n text='Submit High Score',\n center_x=SCREEN_WIDTH // 2 + 200,\n center_y=SCREEN_HEIGHT - 150,\n width=250,\n height=50,\n )\n self.high_score_button.set_style_attrs(\n font_color=arcade.color.BLACK,\n bg_color=arcade.color.GREEN,\n bg_color_hover=arcade.color.GO_GREEN,\n bg_color_press=arcade.color.WHITE,\n border_color=arcade.color.WHITE,\n )\n self.high_score_button.add_event_listener(self.submit_high_score)\n self.ui_manager.add_ui_element(self.high_score_button)\n\n self.initials_input_box = MyInputBox(\n text='initials',\n center_x=SCREEN_WIDTH // 2 - 200,\n center_y=SCREEN_HEIGHT - 150,\n width=250,\n height=50,\n )\n self.initials_input_box.add_event_listener(self.submit_high_score)\n self.ui_manager.add_ui_element(self.initials_input_box)\n \n def on_draw(self):\n arcade.start_render()\n arcade.draw_text(\"Game Over\", 350, 350, arcade.color.RED_DEVIL, font_size=50)\n arcade.draw_text(\"Your Score: %i\" % self.score, SCREEN_WIDTH // 2, SCREEN_HEIGHT - 100, arcade.color.WHITE, font_size=20, anchor_x=\"center\")\n if self.submitted:\n arcade.draw_text(\"You score has been submitted!\", SCREEN_WIDTH // 2, SCREEN_HEIGHT - 150, arcade.color.GO_GREEN, font_size=20, anchor_x=\"center\", anchor_y=\"center\")\n\n def restart(self):\n self.game_view.setup()\n self.window.show_view(self.game_view)\n\n def level_select(self):\n new_view = level_select.LevelSelect(self.game_view)\n self.window.show_view(new_view)\n\n def submit_high_score(self):\n if not self.submitted:\n if put_high_score(HighScore(self.initials_input_box.text, self.score)):\n self.submitted = True\n self.high_score_button.scale = 0\n self.initials_input_box.scale = 0\n","sub_path":"game_over.py","file_name":"game_over.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303933951","text":"# from __future__ import print_function, absolute_import, division\nimport logging\nimport multiprocessing\nimport shutil\nimport types\nfrom pathlib import Path\nimport traceback\nimport lightkurve\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport yaml\nfrom matplotlib.colorbar import Colorbar\nfrom matplotlib import patches\nfrom astropy.visualization.mpl_normalize import ImageNormalize\nfrom astropy.table import Table\nfrom astropy.io import ascii\nimport astropy.visualization as stretching\nfrom argparse import ArgumentParser\nfrom sherlockpipe import tpfplotter\nimport six\nimport sys\nimport sherlockpipe.LATTE\nsys.modules['astropy.extern.six'] = six\nsys.modules['LATTE'] = sherlockpipe.LATTE\n\nmatplotlib.use('Agg')\nimport pandas as pd\nimport os\nfrom os.path import exists\nimport ast\nimport csv\nfrom sherlockpipe.LATTE import LATTEutils, LATTEbrew\nfrom os import path\nfrom math import ceil\n\n\n# get the system path\nsyspath = str(os.path.abspath(LATTEutils.__file__))[0:-14]\n# ---------\n\n# --- IMPORTANT TO SET THIS ----\nout = 'pipeline' # or TALK or 'pipeline'\nttran = 0.1\nresources_dir = path.join(path.dirname(__file__))\n\n\nclass Vetter:\n \"\"\"\n Provides transiting candidate vetting information like centroids and spaceship motion, momentum dumps, neighbours\n curves inspection and more to give a deeper insight on the quality of the candidate signal.\n \"\"\"\n def __init__(self, object_dir, validate):\n self.args = types.SimpleNamespace()\n self.args.noshow = True\n self.args.north = False\n self.args.o = True\n self.args.mpi = False\n self.args.auto = True\n self.args.save = True\n self.args.nickname = \"\" # TODO do we set the sherlock id?\n self.args.FFI = False # TODO get this from input\n self.args.targetlist = \"best_signal_latte_input.csv\"\n self.args.new_path = \"\" # TODO check what to do with this\n self.object_dir = os.getcwd() if object_dir is None else object_dir\n self.latte_dir = str(Path.home()) + \"/.sherlockpipe/latte/\"\n if not os.path.exists(self.latte_dir):\n os.mkdir(self.latte_dir)\n self.data_dir = self.object_dir\n self.validation_runs = 5\n self.validate = validate\n\n def update(self):\n \"\"\"\n Updates the LATTE metadata to be up to date with the latest TESS information.\n \"\"\"\n indir = self.latte_dir\n if os.path.exists(indir) and os.path.isdir(indir):\n shutil.rmtree(indir, ignore_errors=True)\n os.makedirs(indir)\n with open(\"{}/_config.txt\".format(indir), 'w') as f:\n f.write(str(indir))\n logging.info(\"Download the text files required ... \")\n logging.info(\"Only the manifest text files (~325 M) will be downloaded and no TESS data.\")\n logging.info(\"This step may take a while but luckily it only has to run once... \\n\")\n if not os.path.exists(\"{}\".format(indir)):\n os.makedirs(indir)\n if not os.path.exists(\"{}/data\".format(indir)):\n os.makedirs(\"{}/data\".format(indir))\n outF = open(indir + \"/data/temp.txt\", \"w\")\n outF.write(\"#all LC file links\")\n outF.close()\n outF = open(indir + \"/data/temp_tp.txt\", \"w+\")\n outF.write(\"#all LC file links\")\n outF.close()\n LATTEutils.data_files(indir)\n LATTEutils.tp_files(indir)\n LATTEutils.TOI_TCE_files(indir)\n LATTEutils.momentum_dumps_info(indir)\n\n def __prepare(self, candidate_df):\n \"\"\"\n Downloads and fills files to be used by LATTE analysis.\n @return: the latte used directory, an open df to be used by LATTE analysis and the tic selected\n \"\"\"\n # check whether a path already exists\n indir = self.latte_dir\n # SAVE the new output path\n if not os.path.exists(\"{}/_config.txt\".format(indir)):\n self.update()\n candidate_df['TICID'] = candidate_df['TICID'].str.replace(\"TIC \", \"\")\n TIC_wanted = list(set(candidate_df['TICID']))\n nlc = len(TIC_wanted)\n logging.info(\"nlc length: {}\".format(nlc))\n logging.info('{}/manifest.csv'.format(str(indir)))\n if exists('{}/manifest.csv'.format(str(indir))):\n logging.info(\"Existing manifest file found, will skip previously processed LCs and append to end of manifest file\")\n else:\n logging.info(\"Creating new manifest file\")\n metadata_header = ['TICID', 'Marked Transits', 'Sectors', 'RA', 'DEC', 'Solar Rad', 'TMag', 'Teff',\n 'thissector', 'TOI', 'TCE', 'TCE link', 'EB', 'Systematics', 'Background Flux',\n 'Centroids Positions', 'Momentum Dumps', 'Aperture Size', 'In/out Flux', 'Keep',\n 'Comment', 'starttime']\n with open('{}/manifest.csv'.format(str(indir)), 'w') as f: # save in the photometry folder\n writer = csv.writer(f, delimiter=',')\n writer.writerow(metadata_header)\n return indir, candidate_df, TIC_wanted, candidate_df.iloc[0][\"ffi\"]\n\n def __process(self, indir, tic, sectors_in, transit_list, t0, period, ffi):\n \"\"\"\n Performs the LATTE analysis to generate PNGs and also the TPFPlotter analysis to get the field of view\n information.\n @param indir: the vetting source and resources directory\n @param tic: the tic to be processed\n @param sectors_in: the sectors to be used for the given tic\n @param transit_list: the list of transits for the given tic\n @param t0: the candidate signal first epoch\n @param period: the candidate signal period\n @param ffi: Whether the candidate came from FFI data\n @return: the given tic\n \"\"\"\n logging.info(\"Runnint TESS Point\")\n sectors_all, ra, dec = LATTEutils.tess_point(indir, tic)\n try:\n sectors = list(set(sectors_in) & set(sectors_all))\n if len(sectors) == 0:\n logging.info(\"The target was not observed in the sector(s) you stated ({}). \\\n Therefore take all sectors that it was observed in: {}\".format(sectors, sectors_all))\n sectors = sectors_all\n except:\n sectors = sectors_all\n logging.info(\"Downloading LATTE data\")\n sectors = np.sort(sectors)\n if not ffi:\n alltime, allflux, allflux_err, all_md, alltimebinned, allfluxbinned, allx1, allx2, ally1, ally2, alltime12, allfbkg, start_sec, end_sec, in_sec, tessmag, teff, srad = LATTEutils.download_data(\n indir, sectors, tic)\n else:\n alltime_list, allflux, allflux_small, allflux_flat, all_md, allfbkg, allfbkg_t, start_sec, end_sec, in_sec, X1_list, X4_list, apmask_list, arrshape_list, tpf_filt_list, t_list, bkg_list, tpf_list = LATTEutils.download_data_FFI(indir, sectors, syspath, sectors_all, tic, True)\n srad = \"-\"\n tessmag = \"-\"\n teff = \"-\"\n alltime = alltime_list\n simple = False\n BLS = False\n model = False\n save = True\n DV = True\n # TODO decide whether to use transit_list or period\n transit_list = []\n last_time = alltime[len(alltime) - 1]\n num_of_transits = int(ceil(((last_time - t0) / period)))\n transit_lists = t0 + period * range(0, num_of_transits)\n time_as_array = np.array(alltime)\n transits_in_data = [time_as_array[(transit > time_as_array - 0.5) & (transit < time_as_array + 0.5)] for transit in transit_lists]\n transit_lists = transit_lists[[len(transits_in_data_set) > 0 for transits_in_data_set in transits_in_data]]\n transit_lists = [transit_lists[x:x + 3] for x in range(0, len(transit_lists), 3)]\n for index, transit_list in enumerate(transit_lists):\n transit_results_dir = self.data_dir + \"/\" + str(index)\n logging.info(\"Brewing LATTE data for transits at T0s: %s\", str(transit_list))\n try:\n if not ffi:\n LATTEbrew.brew_LATTE(tic, indir, syspath, transit_list, simple, BLS, model, save, DV, sectors,\n sectors_all,\n alltime, allflux, allflux_err, all_md, alltimebinned, allfluxbinned, allx1, allx2,\n ally1, ally2, alltime12, allfbkg, start_sec, end_sec, in_sec, tessmag, teff, srad, ra,\n dec, self.args)\n else:\n LATTEbrew.brew_LATTE_FFI(tic, indir, syspath, transit_list, simple, BLS, model, save, DV, sectors,\n sectors_all, alltime, allflux_flat, allflux_small, allflux, all_md, allfbkg,\n allfbkg_t, start_sec, end_sec, in_sec, X1_list, X4_list, apmask_list,\n arrshape_list, tpf_filt_list, t_list, bkg_list, tpf_list, ra, dec, self.args)\n # LATTE_DV.LATTE_DV(tic, indir, syspath, transit_list, sectors_all, simple, BLS, model, save, DV, sectors,\n # sectors_all,\n # alltime, allflux, allflux_err, all_md, alltimebinned, allfluxbinned, allx1, allx2,\n # ally1, ally2, alltime12, allfbkg, start_sec, end_sec, in_sec, tessmag, teff, srad, ra,\n # dec, self.args)\n tp_downloaded = True\n shutil.move(vetter.latte_dir + \"/\" + tic, transit_results_dir)\n except Exception as e:\n traceback.print_exc()\n # see if it made any plots - often it just fails on the TPs as they are very large\n if exists(\"{}/{}/{}_fullLC_md.png\".format(indir, tic, tic)):\n logging.warning(\"couldn't download TP but continue anyway\")\n tp_downloaded = False\n shutil.move(vetter.latte_dir + \"/\" + tic, transit_results_dir)\n else:\n continue\n # check again whether the TPs downloaded - depending on where the code failed it might still have worked.\n if exists(\"{}/{}/{}_aperture_size.png\".format(indir, tic, tic)):\n tp_downloaded = True\n else:\n tp_downloaded = False\n logging.warn(\"code ran but no TP -- continue anyway\")\n # -------------\n # check whether it's a TCE or a TOI\n\n # TCE -----\n lc_dv = np.genfromtxt('{}/data/tesscurl_sector_all_dv.sh'.format(indir), dtype=str)\n TCE_links = []\n for i in lc_dv:\n if str(tic) in str(i[6]):\n TCE_links.append(i[6])\n if len(TCE_links) == 0:\n TCE = \" - \"\n TCE = False\n else:\n TCE_links = np.sort(TCE_links)\n TCE_link = TCE_links[0] # this link should allow you to acess the MAST DV report\n TCE = True\n # TOI -----\n TOI_planets = pd.read_csv('{}/data/TOI_list.txt'.format(indir), comment=\"#\")\n TOIpl = TOI_planets.loc[TOI_planets['TIC'] == float(tic)]\n TOI = False\n # TODO check why TOI is useful\n # else:\n # TOI = True\n # TOI_name = (float(TOIpl[\"Full TOI ID\"]))\n # -------------\n # return the tic so that it can be stored in the manifest to keep track of which files have already been produced\n # and to be able to skip the ones that have already been processed if the code has to be restarted.\n mnd = {}\n mnd['TICID'] = tic\n mnd['MarkedTransits'] = transit_list\n mnd['Sectors'] = sectors_all\n mnd['RA'] = ra\n mnd['DEC'] = dec\n mnd['SolarRad'] = srad\n mnd['TMag'] = tessmag\n mnd['Teff'] = teff\n mnd['thissector'] = sectors\n # make empty fields for the test to be checked\n if TOI == True:\n mnd['TOI'] = TOI_name\n else:\n mnd['TOI'] = \" \"\n if TCE == True:\n mnd['TCE'] = \"Yes\"\n mnd['TCE_link'] = TCE_link\n else:\n mnd['TCE'] = \" \"\n mnd['TCE_link'] = \" \"\n mnd['EB'] = \" \"\n mnd['Systematics'] = \" \"\n mnd['TransitShape'] = \" \"\n mnd['BackgroundFlux'] = \" \"\n mnd['CentroidsPositions'] = \" \"\n mnd['MomentumDumps'] = \" \"\n mnd['ApertureSize'] = \" \"\n mnd['InoutFlux'] = \" \"\n mnd['Keep'] = \" \"\n mnd['Comment'] = \" \"\n mnd['starttime'] = np.nanmin(alltime) if not isinstance(alltime, str) else \"-\"\n return mnd\n\n def vetting(self, candidate, cpus):\n \"\"\"\n Performs the LATTE vetting procedures\n @param candidate: the candidate dataframe containing TICID, period, t0, transits and sectors data.\n @param cpus: the number of cpus to be used. This parameter is of no use yet.\n \"\"\"\n indir, df, TIC_wanted, ffi = self.__prepare(candidate)\n for tic in TIC_wanted:\n # check the existing manifest to see if I've processed this file!\n manifest_table = pd.read_csv('{}/manifest.csv'.format(str(indir)))\n # get a list of the current URLs that exist in the manifest\n urls_exist = manifest_table['TICID']\n # get the transit time list\n period = df.loc[df['TICID'] == tic]['period'].iloc[0]\n t0 = df.loc[df['TICID'] == tic]['t0'].iloc[0]\n transit_list = ast.literal_eval(((df.loc[df['TICID'] == tic]['transits']).values)[0])\n candidate_row = candidate.iloc[0]\n try:\n sectors_in = ast.literal_eval(str(((df.loc[df['TICID'] == tic]['sectors']).values)[0]))\n if (type(sectors_in) == int) or (type(sectors_in) == float):\n sectors = [sectors_in]\n else:\n sectors = list(sectors_in)\n except:\n sectors = [0]\n index = 0\n vetting_dir = self.data_dir + \"/vetting_\" + str(index)\n while os.path.exists(vetting_dir) or os.path.isdir(vetting_dir):\n vetting_dir = self.data_dir + \"/vetting_\" + str(index)\n index = index + 1\n os.mkdir(vetting_dir)\n self.data_dir = vetting_dir\n ra = None\n dec = None\n try:\n res = self.__process(indir, tic, sectors, transit_list, t0, period, ffi)\n ra = res['RA']\n dec = res['DEC']\n if res['TICID'] == -99:\n logging.error('something went wrong with the LATTE results')\n except Exception as e:\n traceback.print_exc()\n try:\n sectors_all, ra, dec = LATTEutils.tess_point(indir, tic)\n except Exception as e1:\n traceback.print_exc()\n if ra is not None and dec is not None:\n result_dir = self.vetting_field_of_view(indir, tic, ra, dec, sectors)\n shutil.move(result_dir, vetting_dir + \"/tpfplot\")\n else:\n logging.info(\"Can't generate tpfplot because RA and DEC are missing.\")\n # TODO improve this condition to check whether tic, sectors and transits exist\n # if not np.isin(tic, urls_exist):\n # # make sure the file is opened as append only\n # with open('{}/manifest.csv'.format(str(indir)), 'a') as tic: # save in the photometry folder\n # writer = csv.writer(tic, delimiter=',')\n # metadata_data = [res['TICID']]\n # metadata_data.append(res['MarkedTransits'])\n # metadata_data.append(res['Sectors'])\n # metadata_data.append(res['RA'])\n # metadata_data.append(res['DEC'])\n # metadata_data.append(res['SolarRad'])\n # metadata_data.append(res['TMag'])\n # metadata_data.append(res['Teff'])\n # metadata_data.append(res['thissector'])\n # metadata_data.append(res['TOI'])\n # metadata_data.append(res['TCE'])\n # metadata_data.append(res['TCE_link'])\n # metadata_data.append(res['EB'])\n # metadata_data.append(res['Systematics'])\n # metadata_data.append(res['BackgroundFlux'])\n # metadata_data.append(res['CentroidsPositions'])\n # metadata_data.append(res['MomentumDumps'])\n # metadata_data.append(res['ApertureSize'])\n # metadata_data.append(res['InoutFlux'])\n # metadata_data.append(res['Keep'])\n # metadata_data.append(res['Comment'])\n # metadata_data.append(res['starttime'])\n # writer.writerow(metadata_data)\n # return TIC_wanted\n\n def vetting_field_of_view(self, indir, tic, ra, dec, sectors):\n \"\"\"\n Runs TPFPlotter to get field of view data.\n @param indir: the data source directory\n @param tic: the target id\n @param ra: the right ascension of the target\n @param dec: the declination of the target\n @param sectors: the sectors where the target was observed\n @return: the directory where resulting data is stored\n \"\"\"\n maglim = 6\n sectors_search = None if sectors is not None and len(sectors) == 0 else sectors\n logging.info(\"Preparing target pixel files for field of view plots\")\n tpf_source = lightkurve.search_targetpixelfile(\"TIC \" + str(tic), sector=sectors, mission='TESS')\n if tpf_source is None or len(tpf_source) == 0:\n ra_str = str(ra)\n dec_str = \"+\" + str(dec) if dec >= 0 else str(dec)\n tpf_source = lightkurve.search_tesscut(ra_str + \" \" + dec_str, sector=sectors_search)\n for i in range(0, len(tpf_source)):\n tpf = tpf_source[i].download(cutout_size=(12, 12))\n pipeline = True\n plt.close()\n fig = plt.figure(figsize=(6.93, 5.5))\n gs = gridspec.GridSpec(1, 3, height_ratios=[1], width_ratios=[1, 0.05, 0.01])\n gs.update(left=0.05, right=0.95, bottom=0.12, top=0.95, wspace=0.01, hspace=0.03)\n ax1 = plt.subplot(gs[0, 0])\n # TPF plot\n mean_tpf = np.mean(tpf.flux.value, axis=0)\n nx, ny = np.shape(mean_tpf)\n norm = ImageNormalize(stretch=stretching.LogStretch())\n division = np.int(np.log10(np.nanmax(tpf.flux.value)))\n splot = plt.imshow(np.nanmean(tpf.flux, axis=0) / 10 ** division, norm=norm, cmap=\"viridis\",\\\n extent=[tpf.column, tpf.column + ny, tpf.row, tpf.row + nx], origin='lower', zorder=0)\n # Pipeline aperture\n if pipeline: #\n aperture_mask = tpf.pipeline_mask\n aperture = tpf._parse_aperture_mask(aperture_mask)\n maskcolor = 'lightgray'\n logging.info(\" --> Using pipeline aperture for sector %s...\", tpf.sector)\n else:\n aperture_mask = tpf.create_threshold_mask(threshold=10, reference_pixel='center')\n aperture = tpf._parse_aperture_mask(aperture_mask)\n maskcolor = 'lightgray'\n logging.info(\" --> Using threshold aperture for target %s...\", tpf.sector)\n\n for i in range(aperture.shape[0]):\n for j in range(aperture.shape[1]):\n if aperture_mask[i, j]:\n ax1.add_patch(patches.Rectangle((j + tpf.column, i + tpf.row),\n 1, 1, color=maskcolor, fill=True, alpha=0.4))\n ax1.add_patch(patches.Rectangle((j + tpf.column, i + tpf.row),\n 1, 1, color=maskcolor, fill=False, alpha=1, lw=2))\n # Gaia sources\n gaia_id, mag = tpfplotter.get_gaia_data_from_tic(tic)\n r, res = tpfplotter.add_gaia_figure_elements(tpf, magnitude_limit=mag + np.float(maglim), targ_mag=mag)\n x, y, gaiamags = r\n x, y, gaiamags = np.array(x) + 0.5, np.array(y) + 0.5, np.array(gaiamags)\n size = 128.0 / 2 ** ((gaiamags - mag))\n plt.scatter(x, y, s=size, c='red', alpha=0.6, edgecolor=None, zorder=10)\n # Gaia source for the target\n this = np.where(np.array(res['Source']) == int(gaia_id))[0]\n plt.scatter(x[this], y[this], marker='x', c='white', s=32, zorder=11)\n # Legend\n add = 0\n if np.int(maglim) % 2 != 0:\n add = 1\n maxmag = np.int(maglim) + add\n legend_mags = np.linspace(-2, maxmag, np.int((maxmag + 2) / 2 + 1))\n fake_sizes = mag + legend_mags # np.array([mag-2,mag,mag+2,mag+5, mag+8])\n for f in fake_sizes:\n size = 128.0 / 2 ** ((f - mag))\n plt.scatter(0, 0, s=size, c='red', alpha=0.6, edgecolor=None, zorder=10,\n label=r'$\\Delta m=$ ' + str(np.int(f - mag)))\n ax1.legend(fancybox=True, framealpha=0.7)\n # Source labels\n dist = np.sqrt((x - x[this]) ** 2 + (y - y[this]) ** 2)\n dsort = np.argsort(dist)\n for d, elem in enumerate(dsort):\n if dist[elem] < 6:\n plt.text(x[elem] + 0.1, y[elem] + 0.1, str(d + 1), color='white', zorder=100)\n # Orientation arrows\n tpfplotter.plot_orientation(tpf)\n # Labels and titles\n plt.xlim(tpf.column, tpf.column + ny)\n plt.ylim(tpf.row, tpf.row + nx)\n plt.xlabel('Pixel Column Number', fontsize=16)\n plt.ylabel('Pixel Row Number', fontsize=16)\n plt.title('Coordinates ' + tic + ' - Sector ' + str(tpf.sector),\n fontsize=16) # + ' - Camera '+str(tpf.camera)) #\n # Colorbar\n cbax = plt.subplot(gs[0, 1]) # Place it where it should be.\n pos1 = cbax.get_position() # get the original position\n pos2 = [pos1.x0 - 0.05, pos1.y0, pos1.width, pos1.height]\n cbax.set_position(pos2) # set a new position\n cb = Colorbar(ax=cbax, cmap=\"viridis\", mappable=splot, orientation='vertical', ticklocation='right')\n plt.xticks(fontsize=14)\n exponent = r'$\\times 10^' + str(division) + '$'\n cb.set_label(r'Flux ' + exponent + r' (e$^-$)', labelpad=10, fontsize=16)\n save_dir = indir + \"/tpfplot\"\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n plt.savefig(save_dir + '/TPF_Gaia_TIC' + tic + '_S' + str(tpf.sector) + '.pdf')\n # Save Gaia sources info\n dist = np.sqrt((x - x[this]) ** 2 + (y - y[this]) ** 2)\n GaiaID = np.array(res['Source'])\n srt = np.argsort(dist)\n x, y, gaiamags, dist, GaiaID = x[srt], y[srt], gaiamags[srt], dist[srt], GaiaID[srt]\n IDs = np.arange(len(x)) + 1\n inside = np.zeros(len(x))\n for i in range(aperture.shape[0]):\n for j in range(aperture.shape[1]):\n if aperture_mask[i, j]:\n xtpf, ytpf = j + tpf.column, i + tpf.row\n _inside = np.where((x > xtpf) & (x < xtpf + 1) &\n (y > ytpf) & (y < ytpf + 1))[0]\n inside[_inside] = 1\n data = Table([IDs, GaiaID, x, y, dist, dist * 21., gaiamags, inside.astype('int')],\n names=['# ID', 'GaiaID', 'x', 'y', 'Dist_pix', 'Dist_arcsec', 'Gmag', 'InAper'])\n ascii.write(data, save_dir + '/Gaia_TIC' + tic + '_S' + str(tpf.sector) + '.dat', overwrite=True)\n return save_dir\n\n\nif __name__ == '__main__':\n ap = ArgumentParser(description='Vetting of Sherlock objects of interest')\n ap.add_argument('--object_dir', help=\"If the object directory is not your current one you need to provide the \"\n \"ABSOLUTE path\", required=False)\n ap.add_argument('--candidate', type=int, default=None, help=\"The candidate signal to be used.\", required=False)\n ap.add_argument('--properties', help=\"The YAML file to be used as input.\", required=False)\n ap.add_argument('--cpus', type=int, default=None, help=\"The number of CPU cores to be used.\", required=False)\n ap.add_argument('--no_validate', dest='validate', action='store_false',\n help=\"Whether to avoid running statistical validation\")\n args = ap.parse_args()\n vetter = Vetter(args.object_dir, args.validate)\n file_dir = vetter.object_dir + \"/vetting.log\"\n if os.path.exists(file_dir):\n os.remove(file_dir)\n formatter = logging.Formatter('%(message)s')\n logger = logging.getLogger()\n while len(logger.handlers) > 0:\n logger.handlers.pop()\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n handler = logging.FileHandler(file_dir)\n handler.setLevel(logging.INFO)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logging.info(\"Starting vetting\")\n if args.candidate is None:\n user_properties = yaml.load(open(args.properties), yaml.SafeLoader)\n candidate = pd.DataFrame(columns=['id', 'transits', 'sectors', 'FFI'])\n candidate = candidate.append(user_properties, ignore_index=True)\n candidate = candidate.rename(columns={'id': 'TICID'})\n candidate['TICID'] = candidate[\"TICID\"].apply(str)\n cpus = user_properties[\"settings\"][\"cpus\"]\n else:\n candidate_selection = int(args.candidate)\n candidates = pd.read_csv(vetter.object_dir + \"/candidates.csv\")\n if candidate_selection < 1 or candidate_selection > len(candidates.index):\n raise SystemExit(\"User selected a wrong candidate number.\")\n candidates = candidates.rename(columns={'Object Id': 'TICID'})\n candidate = candidates.iloc[[candidate_selection - 1]]\n candidate['number'] = [candidate_selection]\n vetter.data_dir = vetter.object_dir\n logging.info(\"Selected signal number \" + str(candidate_selection))\n if args.cpus is None:\n cpus = multiprocessing.cpu_count() - 1\n else:\n cpus = args.cpus\n vetter.vetting(candidate, cpus)\n","sub_path":"sherlockpipe/vet.py","file_name":"vet.py","file_ext":"py","file_size_in_byte":26718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164057817","text":"from django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom datetime import date\n\n\n# Create your views here.\n# @login_required(login_url='/login/')\ndef home(request):\n todaydate = date.today()\n print(\"la date aujourd'hui\")\n print(todaydate)\n consultants = Consultant.objects.all()\n content = {\n \"consultants\": consultants,\n \"todaydate\" : todaydate\n }\n return render(request, 'base.html')\n\n# @login_required(login_url='/login/')\ndef login(request):\n return render(request, 'registration/login.html')\n\n# @login_required(login_url='/login/')\ndef consultant_liste(request):\n consultants = Consultant.objects.all()\n content = {\n \"consultants\": consultants\n }\n\n return render(request, 'consultant_liste.html', content)\n\n# @login_required(login_url='/login/')\ndef consultant_detail(request, idConsultant=None):\n consultant = Consultant.objects.filter(id=idConsultant).first()\n content = {\n \"consultant\":consultant,\n }\n return render(request, 'consultant_detail.html', content)\n\n# @login_required(login_url='/login/')\ndef consultant_modifier(request, idConsultant=None):\n consultant = Consultant.objects.filter(id=idConsultant).first()\n form = ConsultantForm(\n request.POST or None,\n request.FILES or None,\n instance=consultant,\n initial={\n 'businessManager':consultant.businessManager,\n 'chargeDeRecrutement':consultant.chargeDeRecrutement,\n 'assistantDAgence':consultant.assistantDAgence,\n })\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('consultant-liste')\n content = {\n \"form\": form\n }\n return render(request, 'consultant_modifier.html', content)\n\n\ndef send_email(request, idConsultant=None):\n consultant = Consultant.objects.filter(id=idConsultant).first()\n form = ConsultantForm(\n request.POST or None,\n \n instance=consultant,\n )\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('consultant-liste')\n content = {\n \"form\": form\n }\n return render(request, 'send_email.html', content)\n \n# @login_required(login_url='/login/')\n#def consultant_creer(request):\n# form = ConsultantForm(request.POST or None, request.FILES)\n# if request.POST:\n# if form.is_valid():\n# instance = form.save(commit=False)\n# instance.save()\n# return redirect('consultant-liste')\n# content = {\n# \"form\": form\n# }\n# return render(request, 'consultant_creer.html', content)\ndef consultant_creer(request):\n #consultant = Consultant.objects.filter(id=idConsultant).first()\n form = ConsultantForm(\n request.POST or None,\n request.FILES or None,\n initial={\n 'businessManager':\"\",\n 'chargeDeRecrutement':\"\",\n 'assistantDAgence':\"\",\n })\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('consultant-liste')\n content = {\n \"form\": form\n }\n return render(request, 'consultant_creer.html', content)\n\n\n\n\n# @login_required(login_url='/login/')\ndef utilisateur_liste(request):\n users = Utilisateur.objects.all()\n for user in users:\n user.get_superieur()\n content = {\n \"users\": users\n }\n return render(request, 'utilisateur_liste.html', content)\n\n# @login_required(login_url='/login/')\ndef utilisateur_detail(request, idUser=None):\n utilisateur = Utilisateur.objects.filter(id=idUser).first()\n utilisateur.get_superieur()\n consultants = Consultant.objects.filter(\n Q(businessManager__user=utilisateur.user)|\n Q(chargeDeRecrutement__user=utilisateur.user)|\n Q(assistantDAgence__user=utilisateur.user)\n )\n content = {\n \"utilisateur\": utilisateur,\n \"consultants\": consultants\n }\n return render(request, 'utilisateur_detail.html', content)\n\ndef user_detail(request, user_id):\n user = User.objects.get(id=user_id)\n content = {\n \"user\": user,\n }\n return render(request, 'user_detail.html', content)\n\n# @login_required(login_url='/login/')\ndef utilisateur_modifier(request, idUser=None):\n \n utilisateur = Utilisateur.objects.filter(id=idUser).first()\n utilisateur.get_superieur()\n form = UtilisateurForm(\n request.POST or None,\n request.FILES or None,\n instance=utilisateur,\n )\n if request.POST:\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n return redirect('utilisateur-liste')\n content = {\n \"form\": form\n }\n return render(request, 'utilisateur_modifier.html', content)\n \n \n\n# @login_required(login_url='/login/')\ndef utilisateur_creer(request):\n form = UtilisateurForm()\n if request.POST:\n form = UtilisateurForm(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n username = instance.prenom[0] + instance.nom\n string = username.split(' ')\n username = '-'.join(string)\n user = User.objects.create_user(\n username=username,\n password=\"12345678\",\n email=\"hello@la.com\",\n first_name=instance.nom,\n last_name=instance.prenom\n )\n user.is_staff = True\n user.save()\n\n if instance.type == \"Chargé de recrutement\":\n cr = ChargeDeRecrutement()\n cr.nom = instance.nom\n if instance.superieur:\n superieur = ChargeDeRecrutement.objects.filter(id=instance.superieur).first()\n if superieur:\n cr.parent = superieur\n cr.prenom = instance.prenom\n cr.type = instance.type\n cr.mdp = instance.mdp\n cr.superieur = \"---\"\n cr.user = user\n cr.save()\n if instance.type == \"Business Manager\":\n bm = BusinessManager()\n if instance.superieur:\n superieur = BusinessManager.objects.filter(id=int(instance.superieur)).first()\n if superieur:\n bm.parent = superieur\n bm.nom = instance.nom\n bm.prenom = instance.prenom\n bm.type = instance.type\n bm.mdp = instance.mdp\n bm.user = user\n bm.superieur = \"---\"\n bm.save()\n if instance.type == \"Assistant d'agence\":\n aa = AssistantAgence()\n aa.nom = instance.nom\n aa.prenom = instance.prenom\n aa.type = instance.type\n aa.mdp = instance.mdp\n aa.user = user\n aa.save()\n if instance.type == \"Responsable des ressources humaines\":\n rrh = ResponsableRessourceHumaine()\n rrh.nom = instance.nom\n rrh.prenom = instance.prenom\n rrh.type = instance.type\n rrh.mdp = instance.mdp\n rrh.user = user\n rrh.save()\n if instance.type == \"Contrôleur de gestion\":\n cg = ControleurDeGestion()\n cg.nom = instance.nom\n cg.prenom = instance.prenom\n cg.type = instance.type\n cg.mdp = instance.mdp\n cg.user = user\n cg.save()\n if instance.type == \"Administrateur\":\n admin = Administrateur()\n admin.nom = instance.nom\n admin.prenom = instance.prenom\n admin.type = instance.type\n admin.mdp = instance.mdp\n admin.user = user\n admin.save()\n return redirect('utilisateur-liste')\n content = {\n \"form\": form,\n }\n return render(request, 'utilisateur_creer.html', content)\n\ndef utilisateur_supprimer(request,idUser=None):\n utilisateur = Utilisateur.objects.filter(id=idUser).first()\n utilisateur.delete()\n return render(request, 'utilisateur_supprimer.html', content)\n \n \n \n ","sub_path":"Akkannuaire/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293392042","text":"# -*- coding: utf-8 -*-\nfrom openprocurement.api.utils import (\n context_unpack,\n json_view,\n)\n\nfrom openprocurement.tender.core.utils import save_tender, optendersresource\n\nfrom openprocurement.tender.openua.validation import (\n validate_complaint_post_data,\n validate_complaint_post_complaint_status,\n validate_complaint_post,\n validate_complaint_post_review_date,\n validate_complaint_post_complaint_type,\n)\nfrom openprocurement.api.views.base import BaseResource\n\n\n@optendersresource(\n name=\"aboveThresholdUA:Tender Complaint Posts\",\n collection_path=\"/tenders/{tender_id}/complaints/{complaint_id}/posts\",\n path=\"/tenders/{tender_id}/complaints/{complaint_id}/posts/{post_id}\",\n procurementMethodType=\"aboveThresholdUA\",\n description=\"Tender complaint posts\",\n)\nclass TenderComplaintPostResource(BaseResource):\n @json_view(\n content_type=\"application/json\",\n validators=(\n validate_complaint_post_complaint_type,\n validate_complaint_post_data,\n validate_complaint_post,\n validate_complaint_post_complaint_status,\n validate_complaint_post_review_date,\n ),\n permission=\"edit_complaint\",\n )\n def collection_post(self):\n \"\"\"\n Post a complaint\n \"\"\"\n complaint = self.context\n tender = self.request.validated[\"tender\"]\n post = self.request.validated[\"post\"]\n post.author = self.request.authenticated_role\n for document in post.documents or []:\n document.author = self.request.authenticated_role\n complaint.posts.append(post)\n if save_tender(self.request, validate=True):\n self.LOGGER.info(\n \"Created post {}\".format(post.id),\n extra=context_unpack(\n self.request,\n {\"MESSAGE_ID\": \"tender_complaint_post_create\"},\n {\"post_id\": post.id}\n ),\n )\n self.request.response.status = 201\n self.request.response.headers[\"Location\"] = self.generate_location_url()\n return {\"data\": post.serialize(\"view\")}\n\n @json_view(permission=\"view_tender\")\n def collection_get(self):\n \"\"\"\n List complaints\n \"\"\"\n return {\"data\": [i.serialize(\"view\") for i in self.context.get(\"posts\", [])]}\n\n @json_view(permission=\"view_tender\")\n def get(self):\n \"\"\"\n Retrieving the complaint\n \"\"\"\n return {\"data\": self.context.serialize(\"view\")}\n\n def generate_location_url(self):\n return self.request.route_url(\n \"{}:Tender Complaint Posts\".format(self.request.validated[\"tender\"].procurementMethodType),\n tender_id=self.request.validated[\"tender_id\"],\n complaint_id=self.request.validated[\"complaint_id\"],\n post_id=self.request.validated[\"post\"][\"id\"],\n )\n","sub_path":"src/openprocurement/tender/openua/views/complaint_post.py","file_name":"complaint_post.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330551255","text":"import nltk\nfrom nltk.tag import pos_tag, map_tag\nfrom nltk.sentiment import SentimentIntensityAnalyzer\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nclass HarassBlock:\n url=''\n \n def analyze(self, url):\n req = requests.get(url) # Get the page\n soup = BeautifulSoup(req.content, 'html.parser') # Parse Text from HTML\n for script in soup([\"script\",\"style\"]):\n script.extract()\n text = soup.find('body').get_text()\n text = re.sub(r'\\n\\s*\\n', r'\\n', text.strip(), flags=re.M) \n \n textLines = text.split('\\n') # Split text into lines array\n \n sid = SentimentIntensityAnalyzer()\n i=0\n lineScoreMap = dict()\n for line in textLines:\n # print(textLines[i])\n scores = sid.polarity_scores(line)\n # print('line' + str(i) + ' - {0}: {1}, '.format('compound', scores['compound']), end='') \n lineScoreMap[textLines[i]] = scores['compound']\n i+=1\n \n harassmentScores = []\n uniqueBadWordsDetected = set([])\n for c in range(len(textLines)):\n # print('line ' + str(c) + ' - ' + textLines[c])\n \n\n\n\n\n # print(textLine[0])\n tokenizedText = nltk.word_tokenize(textLines[c]) # Tokenize the line into words\n posTagged = pos_tag(tokenizedText) # Tag pronouns... ect\n simplifiedTags = [(word, map_tag('en-ptb', 'universal', tag)) for word, tag in posTagged]\n \n badWords = self.readBadWords() # Read list of badwords\n # print(badWords)\n \n # print(simplifiedTags)\n\n for i in range(len(simplifiedTags) - 1): # loop through word,tag array\n if simplifiedTags[i][1] == 'PRON' and simplifiedTags[i+1][0] in badWords:\n print('Harassment Detected (PRONOUN BADWORD) - ' + simplifiedTags[i+1][0])\n uniqueBadWordsDetected.add(simplifiedTags[i+1][0])\n harassmentScores.append(lineScoreMap[textLines[c]])\n # print(lineScoreMap[textLines[c]])\n elif simplifiedTags[i][0] in badWords and simplifiedTags[i+1][1] == 'PRON':\n print('Harassment Detected (BADWORD PRONOUN) - ' + simplifiedTags[i][0])\n uniqueBadWordsDetected.add(simplifiedTags[i][0])\n harassmentScores.append(lineScoreMap[textLines[c]])\n elif simplifiedTags[i][1] == 'PRON':\n print (simplifiedTags[i+2:])\n for j in simplifiedTags[i+2:]:\n if j[0] in badWords:\n print('Harassment Detected (PRONOUN ... BADWORD) - ' + j[0])\n uniqueBadWordsDetected.add(j[0])\n harassmentScores.append(lineScoreMap[textLines[c]])\n print(harassmentScores)\n \n totalHarassmentRating = sum(harassmentScores)\n print(totalHarassmentRating)\n return totalHarassmentRating\n\n def readBadWords(self):\n with open('google_badlist.txt', 'r') as badWords:\n return [badWord.rstrip() for badWord in badWords.readlines()]\n\n\n\ndef main():\n instance = HarassBlock()\n # instance.analyze('http://www.urbandictionary.com/define.php?term=fuck%20you')\n instance.analyze('https://ghostbin.com/paste/7yhn5')\n\nif __name__ == '__main__':\n main()\n","sub_path":"harass-block.py","file_name":"harass-block.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507733555","text":"from odoo import api, fields, models, _, tools\nfrom datetime import datetime, timedelta\nfrom odoo.exceptions import UserError, ValidationError\n\nclass Manufacturing(models.Model):\n _inherit = 'mrp.production'\n\n project = fields.Many2one('project.project',\"Project\")\n\n @api.onchange('project')\n def set_mo_in_project(self):\n if self.project:\n self.project.manufacturing_order = self.id.origin\n\n\nclass WorkOrders(models.Model):\n _inherit = 'mrp.workorder'\n\n project_task = fields.Many2one('project.task',\"Project Task\")\n\n @api.onchange('project_task')\n def set_wo_in_tasks(self):\n if self.project_task:\n self.project_task.work_order = self.id.origin\n\n @api.depends('production_id.project')\n def set_tasks_domain(self):\n if self.production_id.project:\n res = {}\n self.tasks_domain = True\n self.project_id = self.production_id.project.id\n res['domain'] = {'project_task': [('project_id.id', '=', self.production_id.project.id)]}\n return res\n else:\n self.tasks_domain = False\n\n project_id = fields.Integer(store=True,string=\"Project \")\n tasks_domain = fields.Boolean(compute='set_tasks_domain')\n\n def button_finish(self):\n for workorder in self:\n if workorder.project_task:\n if (workorder.workcenter_id.is_subcontracted and workorder.project_task.kanban_state == 'done') or (not workorder.workcenter_id.is_subcontracted):\n end_date = datetime.now()\n if workorder.state in ('done', 'cancel'):\n continue\n workorder.end_all()\n vals = {\n 'state': 'done',\n 'date_finished': end_date,\n 'date_planned_finished': end_date\n }\n if not workorder.project_task.kanban_state == 'done':\n workorder.project_task.kanban_state = 'done'\n if not workorder.date_start:\n vals['date_start'] = end_date\n if not workorder.date_planned_start or end_date < workorder.date_planned_start:\n vals['date_planned_start'] = end_date\n workorder.write(vals)\n\n workorder._start_nextworkorder()\n return True\n else:\n raise ValidationError(_(\"Please finish the corresponding task first !\"))\n elif workorder.workcenter_id.is_subcontracted and not workorder.project_task:\n raise ValidationError(_(\"Please select a task !\"))\n\n elif not workorder.workcenter_id.is_subcontracted and not workorder.project_task:\n end_date = datetime.now()\n if workorder.state in ('done', 'cancel'):\n continue\n workorder.end_all()\n vals = {\n 'state': 'done',\n 'date_finished': end_date,\n 'date_planned_finished': end_date\n }\n if not workorder.date_start:\n vals['date_start'] = end_date\n if not workorder.date_planned_start or end_date < workorder.date_planned_start:\n vals['date_planned_start'] = end_date\n workorder.write(vals)\n\n workorder._start_nextworkorder()\n return True\n\n\n\nclass MrpWorkcenter(models.Model):\n _inherit = 'mrp.workcenter'\n\n is_subcontracted = fields.Boolean(\"Is Subcontracted\")","sub_path":"alm_custom/models/manufacturing.py","file_name":"manufacturing.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"459407460","text":"#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the swapNodes function below.\n#\ndef swapNodes(indexes, queries):\n #\n # Write your code here.\n #\n sys.setrecursionlimit(1500)\n\n max_node_value = max([max(idx) for idx in indexes])\n heights = [0 for i in range(0,max_node_value+1)]\n left = [0 for i in range(0,max_node_value+1)]\n right = [0 for i in range(0,max_node_value+1)]\n\n cur_node = max_node_value\n for idx in reversed(indexes):\n left[cur_node] = idx[0]\n right[cur_node] = idx[1]\n cur_node -= 1\n\n nodeHeights(left, right, 1, heights, 1)\n idxHeights = indexHeights(indexes, heights)\n\n output = []\n for query in queries:\n multiples = set(range(0, max(idxHeights)+1, query)[1:])\n if 1 in multiples:\n tmp = left[1]\n left[1] = right[1]\n right[1] = tmp\n for i, idx in enumerate(indexes):\n if idxHeights[i] in multiples:\n #swap left's children\n tmp = left[indexes[i][0]]\n left[indexes[i][0]] = right[indexes[i][0]]\n right[indexes[i][0]] = tmp\n #swap right's children\n tmp = left[indexes[i][1]]\n left[indexes[i][1]] = right[indexes[i][1]]\n right[indexes[i][1]] = tmp\n\n output.append(inOrderArr(left, right, 1))\n\n return output\n\n\ndef indexHeights(indexes, nodeHeights):\n\n idxHeights = [0 for idx in indexes]\n for i,idx in enumerate(indexes):\n if idx[0]==-1 and idx[1]==-1:\n idxHeights[i] = -1\n else:\n idxHeights[i] = nodeHeights[max(idx)]\n return idxHeights\n\ndef inOrderArr(left, right, cur_node):\n if (cur_node == -1):\n return []\n else:\n return inOrderArr(left, right, left[cur_node]) + [cur_node] + \\\n inOrderArr(left, right, right[cur_node])\n\ndef nodeHeights(left, right, cur_node, heights, height):\n if (cur_node == -1):\n pass\n else:\n nodeHeights(left, right, left[cur_node], heights, height+1)\n heights[cur_node] = height\n nodeHeights(left, right, right[cur_node], heights, height+1)\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n indexes = []\n\n for _ in range(n):\n indexes.append(list(map(int, input().rstrip().split())))\n\n queries_count = int(input())\n\n queries = []\n\n for _ in range(queries_count):\n queries_item = int(input())\n queries.append(queries_item)\n\n result = swapNodes(indexes, queries)\n\n fptr.write('\\n'.join([' '.join(map(str, x)) for x in result]))\n #fptr.write('\\n')\n\n fptr.close()\n","sub_path":"iprep/search/swap_nodes_algo.py","file_name":"swap_nodes_algo.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336657402","text":"# This file includes some methods to analyze the structure\n# of atoms contained in a cfg_class object\n\nfrom read_cfg import *\n\ndef pair_correlation(cfg, low, high, nbins):\n atoms = cfg.atoms\n natoms = cfg.natoms\n hist = np.zeros(nbins)\n for i in range(natoms):\n atomi = atoms[i]\n neigh_dist = [neighii.dist for neighii in atomi.neigh]\n ihist, edges = np.histogram(neigh_dist, bins=nbins, range=(low, high))\n hist = [hist[ii] + ihist[ii] for ii in range(len(hist))]\n\n center = [(edges[ii] + edges[ii + 1]) / 2 for ii in range(0, len(edges) - 1)]\n deltar = center[1] - center[0]\n\n g = [hist[ii] / (4 * math.pi * (center[ii]) ** 2 * deltar * natoms ** 1) \\\n for ii in range(len(hist))]\n\n return [center, g]\n\n# bond orientational order diagram\ndef bood(cfg, min_neigh, phi_bin=50, theta_bin=50):\n atoms = cfg.atoms\n hist = np.zeros( (phi_bin+1, theta_bin+1) )\n theta_incre = math.pi / (theta_bin)\n phi_incre = 2*math.pi / (phi_bin)\n\n for atomi in atoms:\n neigh_num = atomi.neigh_num()\n if neigh_num < min_neigh:\n continue\n ri = atomi.r\n for neighj in atomi.neigh:\n rj = neighj.r\n theta_ij = int( math.floor(theta(ri, rj)/theta_incre) )\n phi_ij = int( math.floor(phi(ri, rj)/phi_incre) )\n hist[phi_ij, theta_ij] += 1\n\n return hist","sub_path":"structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440608248","text":"import re\nfrom decimal import Decimal\nimport datetime\n\nfrom django.db import models\nfrom django.db.models import aggregates\nfrom django.utils import six\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom psycopg2._range import Range, DateRange, DateTimeTZRange, NumericRange\n\nfrom postgres.forms import range_fields\n\n\nRANGE_RE = re.compile(\n r'^\\W*(?P[\\[\\(])'\n r'(?P.+)?'\n r','\n r'(?P.+)?'\n r'(?P[\\]\\)])\\W*$'\n)\n\nNUMBER_RE = re.compile(\n r'^\\d*\\.?\\d*$'\n)\nDATE_RE = re.compile(\n r'^(?P\\d\\d\\d\\d)-(?P(0\\d)|(1[012]))-(?P([012]\\d)|(3[01]))$'\n)\nDATETIME_RE = re.compile(\n r'^(?P\\d\\d\\d\\d)-(?P(0\\d)|(1[012]))-(?P([012]\\d)|(3[01]))'\n r' (?P\\d\\d):(?P\\d\\d):(?P\\d\\d)$'\n)\n\n\ndef cast(value):\n if not value:\n return None\n\n if NUMBER_RE.match(value):\n if '.' in value:\n return Decimal(value)\n return int(value)\n\n if DATE_RE.match(value):\n return datetime.date(**dict(\n (key, int(value)) for key, value in DATE_RE.match(value).groupdict()\n ))\n\n if DATETIME_RE.match(value):\n return datetime.datetime(**dict(\n (key, int(value)) for key, value in DATETIME_RE.match(value).groupdict()\n ))\n\n return None\n\n\ndef range_from_string(cls, value):\n match = RANGE_RE.match(value)\n if not match:\n raise forms.ValidationError(_('Invalid range string'))\n\n data = match.groupdict()\n data['bounds'] = '%s%s' % (\n data.pop('lower_bound'), data.pop('upper_bound')\n )\n\n data['lower'] = cast(data['lower'])\n data['upper'] = cast(data['upper'])\n\n return cls(**data)\n\n\ndef is_range(value):\n if isinstance(value, six.string_types):\n return RANGE_RE.match(value)\n\n if isinstance(value, Range):\n return True\n\n # Does it quack like a range?\n return all([hasattr(value, x) for x in ['upper', 'lower', 'upper_inc', 'lower_inc']])\n\n\nclass RangeField(models.Field):\n range_type = Range\n\n def __init__(self, *args, **kwargs):\n self.empty = kwargs.pop('empty', True)\n super(RangeField, self).__init__(*args, **kwargs)\n\n def formfield(self, **kwargs):\n defaults = {\n 'form_class': self.formfield_class,\n 'range_type': self.range_type\n }\n defaults.update(kwargs)\n return super(RangeField, self).formfield(**defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super(RangeField, self).deconstruct()\n path = 'postgres.fields.range_fields.{}'.format(self.__class__.__name__)\n return name, path, args, kwargs\n\n\nclass NumericRangeField(RangeField):\n range_type = NumericRange\n formfield_class = range_fields.NumericRangeField\n\n\nclass Int4RangeField(NumericRangeField):\n def db_type(self, connection):\n return 'int4range'\n\n def get_internal_type(self):\n return 'Int4RangeField'\n\n\nclass Int8RangeField(NumericRangeField):\n def db_type(self, connection):\n return 'int8range'\n\n def get_internal_type(self):\n return 'Int8RangeField'\n\n\nclass DateRangeField(RangeField):\n range_type = DateRange\n formfield_class = range_fields.DateRangeField\n\n def db_type(self, connection):\n return 'daterange'\n\n def get_internal_type(self):\n return 'DateRangeField'\n\n\nclass DateTimeRangeField(RangeField):\n range_type = DateTimeTZRange\n formfield_class = range_fields.DateTimeRangeField\n\n def db_type(self, connection):\n return 'tstzrange'\n\n\nclass RangeLookup(models.Lookup):\n def __init__(self, lhs, rhs):\n self.lhs, self.rhs = lhs, rhs\n # We need to cast a string that looks like a range\n # to a range of the correct type, so psycopg2 will\n # adapt it correctly.\n if isinstance(rhs, six.string_types) and RANGE_RE.match(rhs):\n self.rhs = range_from_string(self.lhs.output_field.range_type, rhs)\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = '%s', [self.rhs]\n params = lhs_params + rhs_params\n return '%s %s %s' % (lhs, self.operator, rhs), params\n\n\n@RangeField.register_lookup\nclass RangeOverlapsLookup(RangeLookup):\n lookup_name = 'overlaps'\n operator = '&&'\n\n\n@RangeField.register_lookup\nclass RangeContainsLookup(RangeLookup):\n lookup_name = 'contains'\n operator = '@>'\n\n\n@RangeField.register_lookup\nclass RangeInLookup(RangeLookup):\n lookup_name = 'in'\n operator = '<@'\n\n\n@RangeField.register_lookup\nclass RangeLeftOfLookup(RangeLookup):\n lookup_name = 'left_of'\n operator = '<<'\n\n\n@RangeField.register_lookup\nclass RangeRightOfLookup(RangeLookup):\n lookup_name = 'right_of'\n operator = '>>'\n\n\n@RangeField.register_lookup\nclass RangeNotExtendsRightOfLookup(RangeLookup):\n lookup_name = 'not_extends_right_of'\n operator = '&<'\n\n\n@RangeField.register_lookup\nclass RangeNotExtendsLeftOfLookup(RangeLookup):\n lookup_name = 'not_extends_left_of'\n operator = '&>'\n\n\n@RangeField.register_lookup\nclass RangeAdjacentTo(RangeLookup):\n lookup_name = 'adjacent_to'\n operator = '-|-'\n\n\ndef InRangeFactory(RangeType, range_cast=None, column_cast=None):\n if not range_cast:\n range_cast = RangeType.__name__.lower()\n if not column_cast:\n column_cast = RangeType.__name__.lower().replace('range', '')\n\n class InRange(models.lookups.BuiltinLookup):\n lookup_name = 'inrange'\n\n def __init__(self, lhs, rhs):\n self.lhs, self.rhs = lhs, rhs\n if not is_range(self.rhs):\n self.rhs = self.get_prep_lookup()\n\n def as_sql(self, qn, connection):\n if is_range(self.rhs):\n return self.in_range_sql(qn, connection)\n return super(InRange, self).as_sql(qn, connection)\n\n def in_range_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = '%s', [self.rhs]\n params = lhs_params + rhs_params\n\n return '%s::%s <@ %s::%s' % (\n lhs, column_cast,\n rhs, range_cast\n ), params\n\n return InRange\n\nmodels.DateField.register_lookup(InRangeFactory(DateRange))\nmodels.DateTimeField.register_lookup(InRangeFactory(DateTimeTZRange))\nmodels.IntegerField.register_lookup(InRangeFactory(NumericRange, range_cast='int4range', column_cast='integer'))\n\n\nclass NormalizeSQL(aggregates.Aggregate):\n sql_template = \"normalize(array_agg(%(field)s))\"\n sql_function = None\n\naggregates.Normalize = NormalizeSQL\n\n\nclass Normalize(models.aggregates.Aggregate):\n name = 'Normalize'\n template = 'normalize(array_agg(%(expression)s))'\n\n\n\nclass MissingSQL(aggregates.Aggregate):\n sql_template = 'missing_ranges(array_agg(%(field)s))'\n sql_function = None\n\naggregates.Missing = MissingSQL\n\n\nclass Missing(models.aggregates.Aggregate):\n name = 'Missing'\n template = 'missing_ranges(array_agg(%(expression)s))'\n","sub_path":"postgres/fields/range_fields.py","file_name":"range_fields.py","file_ext":"py","file_size_in_byte":7051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653125190","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-x86_64/egg/cachesweeper/tests.py\n# Compiled at: 2011-01-26 07:35:04\nfrom django.test import TestCase, Client\nfrom django.core.cache import cache\nfrom django.core.urlresolvers import reverse\nfrom django.utils.http import urlquote\nfrom django.utils.hashcompat import md5_constructor\nfrom django.core.management import call_command\nfrom cachesweeper.utils import cache_token_key_for_record, generate_fragment_cache_key_for_record\nfrom cachesweeper.test_models import Comment, Article, TestMixinModel, TestAttributeModel\n\nclass FragmentCacheInvalidation(TestCase):\n fixtures = [\n 'test_auth_data', 'test_cachesweeper_data']\n\n def __init__(self, *args, **kwargs):\n call_command('syncdb')\n super(FragmentCacheInvalidation, self).__init__(*args, **kwargs)\n\n def setUp(self):\n cache.clear()\n\n def tearDown(self):\n pass\n\n def test_version_at_creation(self):\n comment = Comment.objects.latest()\n comment.like_it()\n version_cache_key = cache_token_key_for_record(comment)\n self.assertEquals(cache.get(version_cache_key), 0)\n\n def test_version_after_save(self):\n comment = Comment.objects.latest()\n version_cache_key = cache_token_key_for_record(comment)\n original_version = cache.get(version_cache_key, None)\n comment.like_it()\n new_version = cache.get(version_cache_key)\n self.assertNotEquals(original_version, new_version)\n return\n\n def test_fragment_cache_miss(self):\n comment = Comment.objects.latest()\n from django.template import Context, Template\n template = Template('\\n {% load cachesweeper_tags %}\\n {% cachesweeper comment 500 \"comment.xml\" %}\\n

\\n {{comment.user}} said at {{comment.created_at}}:
\\n {{comment.content}}\\n
\\n

\\n {% endcachesweeper %}\\n ')\n template.render(Context({'comment': comment}))\n cache_key = generate_fragment_cache_key_for_record(comment, 'comment.xml')\n self.assertTrue(cache.get(cache_key))\n comment.like_it()\n new_cache_key = generate_fragment_cache_key_for_record(comment, 'comment.xml')\n self.assertNotEquals(cache_key, new_cache_key)\n self.assertFalse(cache.get(new_cache_key))\n\n def test_modelsweeper_mixin(self):\n tmm = TestMixinModel(text='testing text')\n tmm.save()\n self.assertEquals(tmm.cachesweeper_version_key, 'cachesweeper.test_models:TestMixinModel:%s' % tmm.pk)\n self.assertEquals(tmm.cachesweeper_version, 0)\n tmm.save()\n self.assertEquals(tmm.cachesweeper_version, 1)\n\n def test_default_version_zero(self):\n tmm = TestMixinModel(text='testing text')\n tmm.save()\n cache.delete(tmm.cachesweeper_version_key)\n self.assertEquals(tmm.cachesweeper_version, 0)\n tmm.save()\n self.assertEquals(tmm.cachesweeper_version, 1)","sub_path":"pycfiles/django_cache_sweeper-0.1.2-py2.6/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"84215031","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 7 15:10:52 2018\n\n@author: Filippo Broggini (ETH Zürich) - filippo.broggini@erdw.ethz.ch\n\"\"\"\n\nimport math\nimport numpy as np\n\nimport serendipyty.seismic.vis.vis as vis\n\nDTYPE = np.float64\n\n__all__ = ['BaseModel', 'AcousticModel']\n\n_sqrt2 = math.sqrt(2.0)\n\n\nclass BaseModel(object):\n r\"\"\"Base class for velocity and density models.\n\n This is implemented as a function object, so the magic happens in the\n `__call__` member function.\n\n Attributes\n ----------\n n : list of int\n Dimensions in (x, y, z)\n ndim : int\n Number of dimensions\n\n Methods\n -------\n __call__(self, t=None, nu=None, **kwargs)\n\n \"\"\"\n\n def __init__(self, modeltype, dx, dy, dz):\n\n self.type = modeltype\n\n self.n = [-1, -1, -1]\n\n self.ndim = 0\n\n # Discretization\n self.dx = dx\n if dy is None:\n self.dy = self.dx\n else:\n self.dy = dy\n if dz is None:\n self.dz = self.dx\n else:\n self.dz = dz\n #raise NotImplementedError('')\n\n def plot(self, style=None, **kwargs):\n r\"\"\"Plot the model parameters.\n \"\"\"\n vis.plot(self.model, style=self.type, **kwargs)\n\n\nclass AcousticModel(BaseModel):\n r\"\"\"Acoustic model.\n\n Velocity and density models for an acoustic medium.\n\n Parameters\n ----------\n dx : float\n Spatial discretization in the x direction.\n dy : float, optional\n Spatial discretization in the y direction.\n dz : float, optional\n Spatial discretization in the z direction.\n vp : float, ndarray\n Velocity model\n rho : float, ndarray\n Density model\n \"\"\"\n\n def __init__(self, dx, vp, rho, dy=None, dz=None):\n\n super().__init__('Acoustic', dx, dy, dz)\n\n # 2D or 3D\n if vp.ndim == 3:\n self.is3d = True\n self.ndim = 3\n self.n = vp.shape\n else:\n self.is3d = False\n self.ndim = 2\n self.n[0], self.n[2] = vp.shape\n self.n[1] = 1\n\n # Model parameters\n self.model = np.zeros((*vp.shape, 2))\n self.model[..., 0] = vp\n self.model[..., 1] = rho\n\n # Vis parameters\n","sub_path":"serendipyty/seismic/model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26797006","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 3 13:26:56 2018\n\n@author: srikant nayak\n\"\"\"\nimport numpy as np\ndef rms_value(x_test,y_test,w_train,test):\n yp=np.dot(x_test,w_train)\n error=np.subtract(yp,y_test)\n error=np.sum(error)\n error2 = (error**2)/test\n \n rms=np.sqrt(error2)\n return(rms)\n \n ","sub_path":"rms.py","file_name":"rms.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653270725","text":"#!/usr/bin/env python3\n\nfrom collections import Counter\n\ndef isValid(s):\n counter_s = Counter(s)\n do_i_use_removal = False\n is_standard_determined = False\n for i, v in enumerate(counter_s.values()):\n if i == 0:\n v_0 = v\n elif i == 1:\n if abs(v_0 - v) > 1:\n return 'NO 1'\n elif v_0 == v:\n standard_v = v\n is_standard_determined = True\n # 이도저도 아닌 경우는 v_0와 v_1이 1차이 나는 경우\n else:\n v_1 = v\n elif i >= 2:\n if is_standard_determined:\n if not((v == standard_v) or (v == standard_v + 1)):\n return 'NO 2', v, standard_v\n # i == 2이고, v_0와 v_1이 1차이가 나서 아직 standard_v를 모르는 경우\n else:\n if (v == v_0) or (v == v_1):\n standard_v = v\n else:\n return 'NO 3'\n\n return 'YES'\n\nif __name__ == '__main__':\n input_str = input()\n print(isValid(input_str))\n","sub_path":"hacker_rank/algorithm/03.strings/23.Sherlock_and_the_Valid_String.py","file_name":"23.Sherlock_and_the_Valid_String.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"558069310","text":"from collections import defaultdict\n\nneg = '~'\n\n\nclass dir_graph:\n def __init__(self):\n # create an empty directed graph, represented by a dictionary\n # The dictionary consists of keys and corresponding lists\n # Key = node u , List = nodes, v, such that (u,v) is an edge\n self.graph = defaultdict(set)\n self.nodes = set()\n\n # Function that adds an edge (u,v) to the graph\n # It finds the dictionary entry for node u and appends node v to its list\n # performance: O(1)\n def addEdge(self, u, v):\n self.graph[u].add(v)\n self.nodes.add(u)\n self.nodes.add(v)\n\n # Function that outputs the edges of all nodes in the graph\n # prints all (u,v) in the set of edges of the graoh\n # performance: O(m+n) m = #edges , n = #nodes\n def print(self):\n edges = []\n # for each node in graph\n for node in self.graph:\n # for each neighbour node of a single node\n for neighbour in self.graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges\n\n\nclass two_cnf:\n def __init__(self):\n self.con = []\n\n # adds a clause to the CNF\n # performance O(1)\n def add_clause(self, clause):\n if len(clause) <= 2:\n self.con.append(clause)\n else:\n print(\"error: clause contains > 2 literals\")\n\n # returns a set of all the variables in the CNF formula\n def get_variables(self):\n vars = set()\n for clause in self.con:\n for literal in clause:\n vars.add(literal)\n return vars\n\n def print(self):\n print(self.con)\n\n\n# helper function that applies the double negation rule to a formula\n# the function removes all occurrences ~~ from the formula\ndef double_neg(formula1):\n return formula1.replace((neg + neg), '')\n\n\n# Function that performs Depth First Search on a directed graph\n# O(|V|+|E|)\ndef DFS(dir_graph1, visited, stack, scc):\n for node in dir_graph1.nodes:\n if node not in visited:\n explore(dir_graph1, visited, node, stack, scc)\n\n\n# DFS helper function that 'explores' as far as possible from a node\ndef explore(dir_graph2, visited, node, stack, scc):\n if node not in visited:\n visited.append(node)\n for neighbour in dir_graph2.graph[node]:\n explore(dir_graph2, visited, neighbour, stack, scc)\n stack.append(node)\n scc.append(node)\n return visited\n\n\n# Function that generates the transpose of a given directed graph\n# Performance O(|V|+|E|)\ndef transpose_graph(d_graph):\n t_graph = dir_graph()\n # for each node in graph\n for node in d_graph.graph:\n # for each neighbour node of a single node\n for neighbour in d_graph.graph[node]:\n t_graph.addEdge(neighbour, node)\n return t_graph\n\n\n# Function that finds all the strongly connected components in a given graph\n# Implementation of Kosaraju’s algorithm\n# Performance O(|V|+|E|) for a directed graph G=(V,E)\n# IN : directed graph, G\n# OUT: list of lists containing the strongly connected components of G\ndef strongly_connected_components(dir_graph1):\n stack = []\n sccs = []\n DFS(dir_graph1, [], stack, [])\n t_g = transpose_graph(dir_graph1)\n visited = []\n while stack:\n node = stack.pop()\n if node not in visited:\n scc = []\n scc.append(node)\n explore(t_g, visited, node, [], scc)\n sccs.append(scc)\n return sccs\n\n\ndef find_contradiction(sccs):\n for component in sccs:\n for literal in component:\n for other_literal in component[component.index(literal):]:\n if other_literal == double_neg(neg + literal):\n return True\n return False\n\n\n# Function that determines if a given 2-CNF is Satisfiable or not\ndef two_sat_solver(two_cnf_formula):\n print(\"Checking if the following 2-CNF is Satisfiable in linear time \")\n two_cnf_formula.print()\n graph = dir_graph()\n for clause in two_cnf_formula.con:\n if len(clause) == 2:\n u = clause[0]\n v = clause[1]\n graph.addEdge(double_neg(neg + u), v)\n graph.addEdge(double_neg(neg + v), u)\n else:\n graph.addEdge(double_neg(neg + clause[0]), clause[0])\n if not find_contradiction(strongly_connected_components(graph)):\n print(\"2-CNF Satisfiable\")\n else:\n print(\"2-CNF not Satisfiable\")\n\n\n# [a, b, a, c, ~b, d]\n# ======= 2-CNF setup =======\nformula = two_cnf()\n\nformula.add_clause(['a', 'b', 'c'])\nformula.add_clause(['~a', 'b', 'm'])\nformula.add_clause(['a', '~b', 'c'])\nformula.add_clause(['~a', '~b', 'd'])\ntwo_sat_solver(formula)\n","sub_path":"solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"492444191","text":"##### Import of the libraries #####\r\n\r\nimport numpy as np, pandas as pd, matplotlib.pyplot as plt\r\nimport os, webrowser\r\nimport librosa as lr, audioread, ffmpeg\r\nfrom librosa import display #!#\r\nfrom glob import glob\r\n\r\n##### Setup ######\r\n\r\nos.chdir('C:\\\\Users\\\\Marek\\\\Desktop\\\\Beaty\\\\MP3\\\\Nowe')\r\nimport webbrowser\r\nwebbrowser.open_new_tab('https://librosa.github.io/librosa/tutorial.html')\r\naudio_files = glob('./*.mp3')\r\n\r\n##### Load first audio file #####\r\n\r\naudio, sfreq = lr.load(audio_files[0])\r\ny_harm, y_perc = lr.effects.hpss(audio)\r\n\r\n##### Separate harmonic and percussive part #####\r\nplt.subplot(3, 1, 3)\r\nlr.display.waveplot(y_harm, sr=sfreq, alpha=0.25)\r\nlr.display.waveplot(y_perc, sr=sfreq, color='r', alpha=0.5)\r\nplt.title('Harmonic + Percussive')\r\nplt.tight_layout()\r\n\r\n##### Display ambiltued of a file\r\n\r\nlr.display.waveplot(audio, sr = sfreq)\r\n\r\n##### Detect tempo and beat events #####\r\nlr.beat.tempo(audio)\r\nlr.beat.beat_track(audio)\r\n\r\n##### Pitchshift audio by 7 semitones (perfect fifth) and write to output #####\r\n\r\naudio_pfifth = lr.effects.pitch_shift(audio, sr = sfreq, n_steps = 7)\r\naudio_pfifth = audio_pfifth * -0.5\r\nlr.output.write_wav(path = 'Ds_Python.mp3', y = audio_pfifth, sr = sfreq)\r\n\r\n#beat_frames = lr.beat.beat_track(y=audio, sr=sfreq, hop_length=512)\r\n#beat_samples = lr.frames_to_samples(beat_frames)\r\n\r\n##### Vocal separation #####\r\n\r\naudio_voc, sfreq = lr.load(audio_files[0])\r\nS_full, phase = lr.magphase(lr.stft(audio_voc))\r\n\r\n#!!# - sprawdzic zakres 0 - 40 Hz\r\n\r\n##### Plot 5 seconds ##### #!# check the rumble <64 Hz\r\nidx = slice(*lr.time_to_frames([30, 35], sr=sfreq))\r\nplt.figure(figsize=(12, 4))\r\nlr.display.specshow(lr.amplitude_to_db(S_full[:, idx], ref=np.max),\r\n y_axis='log', x_axis='time', sr=sfreq)\r\nplt.colorbar()\r\nplt.tight_layout()\r\n\r\n##### Decompose vocal and instrumentation #####\r\n\r\nS_filter = lr.decompose.nn_filter(S_full,\r\n aggregate=np.median,\r\n metric='cosine',\r\n width=int(lr.time_to_frames(2, sr=sfreq)))\r\n\r\nS_filter = np.minimum(S_full, S_filter)\r\n\r\nmargin_i, margin_v = 2, 10\r\npower = 2\r\n\r\nmask_i = lr.util.softmask(S_filter,\r\n margin_i * (S_full - S_filter),\r\n power=power)\r\n\r\nmask_v = lr.util.softmask(S_full - S_filter,\r\n margin_v * S_filter,\r\n power=power)\r\n\r\nS_foreground = mask_v * S_full\r\nS_background = mask_i * S_full\r\n\r\n##### \r\n\r\n\r\nplt.figure(figsize=(12, 8))\r\nplt.subplot(3, 1, 1)\r\nlr.display.specshow(lr.amplitude_to_db(S_full[:, idx], ref=np.max),\r\n y_axis='log', sr=sfreq)\r\nplt.title('Full spectrum')\r\nplt.colorbar()\r\n\r\nplt.subplot(3, 1, 2)\r\nlr.display.specshow(lr.amplitude_to_db(S_background[:, idx], ref=np.max),\r\n y_axis='log', sr=sfreq)\r\nplt.title('Background')\r\nplt.colorbar()\r\nplt.subplot(3, 1, 3)\r\nlr.display.specshow(lr.amplitude_to_db(S_foreground[:, idx], ref=np.max),\r\n y_axis='log', x_axis='time', sr=sfreq)\r\nplt.title('Foreground')\r\nplt.colorbar()\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nlr.output.write_wav(path = 'Bushido_WO_Vocal.wav', y = S_filter, sr = sfreq)\r\n","sub_path":"Librosa.py","file_name":"Librosa.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"184579442","text":"import torch\r\nimport torch.nn as nn\r\n\r\nfrom maskgan.maskgan_networks.base import LSTMEncoder, Attention, LSTMDecoder\r\n\r\n\r\nclass MaskGANCritic(nn.Module):\r\n def __init__(self, args, task):\r\n super(MaskGANCritic, self).__init__()\r\n\r\n self.args = args\r\n\r\n if task.source_dictionary != task.target_dictionary:\r\n raise ValueError\r\n\r\n if args.enc_emb_dim != args.dec_emb_dim:\r\n raise ValueError\r\n\r\n if args.share_dec_input_output_emb and (\r\n args.dec_emb_dim != args.dec_out_emb_dim):\r\n raise ValueError\r\n\r\n pretrained_enc_emb = nn.Embedding(\r\n len(task.source_dictionary), args.enc_emb_dim, task.source_dictionary[''])\r\n\r\n pretrained_dec_emb = None\r\n\r\n self.encoder = LSTMEncoder(\r\n args=args,\r\n dictionary=task.source_dictionary,\r\n bidirectional=args.bidirectional,\r\n pretrained_emb=pretrained_enc_emb)\r\n\r\n self.decoder = LSTMDecoder(\r\n args=args,\r\n dictionary=task.target_dictionary,\r\n encoder_output=self.encoder.rnn_output_dim,\r\n attention_module=Attention,\r\n pretrained_emb=pretrained_dec_emb)\r\n\r\n out_embed_dim = self.decoder.additional_fc.out_feature \\\r\n if hasattr(self, 'additional_fc') else self.decoder.dec_hid_dim\r\n\r\n self.decoder.fc_out = nn.Linear(out_embed_dim, 1)\r\n\r\n assert isinstance(self.encoder, nn.Module)\r\n assert isinstance(self.decoder, nn.Module)\r\n\r\n def forward(self, srcs, lengths, samples):\r\n self.encoder.rnn.flatten_parameters()\r\n\r\n encoder_output = self.encoder(srcs, lengths)\r\n x, attn_scores = self.decoder(samples, encoder_out=encoder_output)\r\n\r\n return x, attn_scores\r\n","sub_path":"maskgan/maskgan_networks/critic.py","file_name":"critic.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"497046557","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nMatt Fleetwood\r\n10 - 20 - 2017\r\nPortland, OR\r\n\r\nModified Pozyx file to cite the read data using pd.read_csv from an online file pathway.\r\nSciPy's Wiener filter function is also used to test how effective it might be for this example.\r\n\r\nThe following is taken from SciPy's API for the Wiener filter\r\n\r\ndocs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.wiener.html \r\n\r\nParameters:\t\r\n\r\n im : ndarray\r\n An N-dimensional array.\r\n \r\n mysize : int or arraylike, optional\r\n A scalar or an N-length list giving the size of the Wiener filter window in each dimension. Elements of mysize should be odd. If mysize is a scalar, then this scalar is used as the size in each dimension.\r\n \r\n noise : float, optional\r\n The noise-power to use. If None, then noise is estimated as the average of the local variance of the input.\r\n\r\nReturns:\t\r\n out : ndarray\r\n Wiener filtered result with the same shape as im.\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndf=pd.read_csv('https://github.com/etcyl/SciPozyx/blob/master/Data/walking_varying_speed_multi_test_ch2.csv', delimiter=' ', usecols=[2, 13, 14, 15], names=['Time','xpos','ypos','zpos'])\r\ndf1=pd.read_csv('https://github.com/etcyl/SciPozyx/blob/master/Data/walking_varying_speed_multi_test_ch2.csv', delimiter=' ', usecols=[2, 13, 14, 15], names=['Time1','xpos1','ypos1','zpos1'])\r\n\r\n\r\nx=df['Time']\r\ny1=df['xpos']\r\ny2=df['ypos']\r\ny3=df['zpos']\r\nplt.subplot(2,1,1)\r\nplt.tick_params(labelsize=6)\r\nplt.plot(x,y1)\r\nplt.plot(x,y2)\r\nplt.plot(x,y3)\r\nplt.xlabel('Time', fontsize=9)\r\nplt.ylabel('Position', fontsize=9)\r\nplt.title('4 Anchors', fontsize=10, weight='bold')\r\nplt.legend( loc=2, prop={'size': 4})\r\nplt.tight_layout()\r\n\r\nx1=df1['Time1']\r\ny4=df1['xpos1']\r\ny5=df1['ypos1']\r\ny6=df1['zpos1']\r\nplt.subplot(2,1,2)\r\nplt.tick_params(labelsize=6)\r\nplt.plot(x1,y4)\r\nplt.plot(x1,y5)\r\nplt.plot(x1,y6)\r\nplt.xlabel('Time', fontsize=9)\r\nplt.ylabel('Position', fontsize=9)\r\nplt.title('6 Anchors', fontsize=10, weight='bold')\r\nplt.legend( loc=2, prop={'size': 4})\r\nplt.tight_layout()\r\n\r\n\r\n#x=df['Time']\r\n#y3=df['Linear-Acceleration-Y']\r\n#y4=df['Acceleration-Y']\r\n#plt.subplot(3,1,2)\r\n#plt.plot(x,y3)\r\n#plt.plot(x,y4)\r\n#plt.xlabel('Time')\r\n#plt.legend( loc=2, prop={'size': 4})\r\n\r\n\r\n\r\n#x=df['Time']\r\n#y5=df['Linear-Acceleration-Z']\r\n#y6=df['Acceleration-Z']\r\n#plt.subplot(3,1,3)\r\n#plt.plot(x,y5)\r\n#plt.plot(x,y6)\r\n#plt.xlabel('Time')\r\n#plt.legend( loc=2, prop={'size': 4})\r\n\r\n\r\nplt.show()\r\n\r\n\r\nax = df.plot.scatter(x='Time', y='Linear-Acceleration-X', s=1, title='Linear-Acceleration-X')\r\nax.set_xlabel(\"Time\")\r\nax.set_ylabel(\"Acceleration-X\")\r\nax.plot()\r\nplt.show()\r\n\r\ndf=pd.read_csv('/Users/CoraJune/Documents/GitHub/Pozyx/Data/pressure_test_srtc_2.txt', delimiter=' ', usecols=['Time','Pressure'])\r\n\r\nprint(df.columns)\r\nax1 = df.plot.line(x='Time', y='Pressure', linewidth=1, title='Pressure')\r\nax1.set_xlabel(\"Time\")\r\nax1.set_ylabel(\"Pressure\")\r\nax1.plot()\r\nplt.show()\r\n\r\ndf=pd.read_csv('/Users/CoraJune/Documents/GitHub/Pozyx/Data/acceleration_test_still.txt', delimiter=' ', usecols=['Time','Linear-Acceleration-Z'])\r\n\r\n\r\nax2 = df.plot.line(x='Time', y='Linear-Acceleration-Z', s=1, title='Linear-Acceleration-Z')\r\nax2.set_xlabel(\"Time\")\r\nax2.set_ylabel(\"Acceleration-Z\")\r\nax2.plot()\r\nplt.show()\r\n\r\ndf.plot()\r\n\r\n#df1=df.diff(1,0)['Angular Velocity']\r\n#ax1=df1.plot.line(x=2, y='Angular Velocity', linewidth=1, title='Angular Acceleration')\r\n\r\n#ax1.set_xlabel(\"Time\")\r\n#ax1.set_ylabel(\"Angular Acceleration\")\r\n#ax1.plot()\r\n#plt.show()\r\n","sub_path":"SciPozyx/Code/pozyx_wiener.py","file_name":"pozyx_wiener.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"600610115","text":"#! /usr/bin/env python3\n\"\"\"\nThe purpose of this problem is to verify whether the method you are using to read input data is sufficiently fast to handle problems branded with the enormous Input/Output warning. You are expected to be able to process at least 2.5MB of input data per second at runtime.\nInput\n\nThe input begins with two positive integers n k (n, k<=107). The next n lines of input contain one positive integer ti, not greater than 109, each.\nOutput\n\nWrite a single integer to output, denoting how many integers ti are divisible by k.\nExample\n\nInput:\n7 3\n1\n51\n966369\n7\n9\n999996\n11\n\nOutput:\n4\n\"\"\"\n'''\na = input().split()\na[0], a[1] = int(a[0]), int(a[1])\ntotal = 0\nfor i in range(a[0]):\n b = int(input())\n if b % a[1] == 0:\n total += 1\nprint(total)\n'''\nimport sys\nn,k,*a = map(int,sys.stdin.buffer.read().split())\ntotal = 0\nfor i in a:\n if not i % k:\n total += 1\nprint(total)\n\n","sub_path":"codechef/enormous.py","file_name":"enormous.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1641916","text":"\"\"\"\nAvoid using built-in functions to solve this challenge. \nImplement them yourself, since this is what you would \nbe asked to do during a real interview.\n\nImplement a function that takes two strings, s and x, \nas arguments and finds the first occurrence of the string \nx in s. The function should return an integer indicating \nthe index in s of the first occurrence of x. If there are \nno occurrences of x in s, return -1.\n\nExample\n\n For s = \"CodefightsIsAwesome\" and x = \"IA\", \n the output should be\tstrstr(s, x) = -1;\n For s = \"CodefightsIsAwesome\" and x = \"IsA\", \n the output should be\tstrstr(s, x) = 10.\n\"\"\"\n\ndef findFirstSubstringOccurrence(s, x):\n if x in s:\n for i in range(len(s)):\n if s[i]==x[0]:\n if s[i:i+len(x)]==x:\n return i\n else:\n return -1","sub_path":"interviewPractice/findFirstSubstringOccurence.py","file_name":"findFirstSubstringOccurence.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394802458","text":"#!/bin/usr/env python3\n# -*- coding:utf-8 -*-\n'''\n模块研究\n提取模块信息,提示用户输入一个模块名。然后使用dir()和其他内建函数提取模块的属性,显示他们的名字、类型、值\n'''\n__author__ = 'Jackie Qiang'\n\ndef show_module(name):\n obj = __import__(name)\n for item in dir(obj):\n print('name:%s' % item)\n print('type:%s' % type(getattr(obj,item)))\n print('value:%s' % getattr(obj,item))\n return True\n\nif __name__ == '__main__':\n name = input('请输入一个模块名:')\n show_module(name)\n","sub_path":"9_8.py","file_name":"9_8.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"627417282","text":"\"\"\"\nTrain the model.\n\"\"\"\n#type the following commad line to train\n#python3 framework/train.py --config train_config.yaml\nfrom pathlib import Path\nimport datetime\nimport argparse\nimport yaml\nimport random\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom time import time\n\nfrom dataset import LandCoverData as LCD\nfrom dataset import parse_image, load_image_train, load_image_test\nfrom model import UNet\nfrom tensorflow_utils import plot_predictions\nfrom utils import YamlNamespace\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.wrappers.scikit_learn import KerasClassifier\nimport os\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"3\"\n\n\n\n\n#Custom metric calculant la Kullback-Leibler Divergence entre la proportion des classes prédites \n# et la vraie proportion des classes en partant des masques predits et des vrais masques\n \ndef custom_KLD(y_true, y_pred):\n \n def bincount_along_axis(arr, minlength=None, axis=-1):\n \"\"\"Bincounts a tensor along an axis\"\"\"\n if minlength is None:\n minlength = tf.reduce_max(arr) + 1\n mask = tf.equal(arr[..., None], tf.range(minlength, dtype=arr.dtype))\n return tf.math.count_nonzero(mask, axis=axis-1 if axis < 0 else axis)\n \n e = 1e-7\n\n pred_mask = tf.argmax(y_pred, -1) \n true_mask = y_true\n \n pred_counts = bincount_along_axis(tf.reshape(pred_mask, (config.batch_size, -1)),\n minlength=LCD.N_CLASSES, axis=-1)\n pred_counts = pred_counts / tf.math.reduce_sum(pred_counts, -1, keepdims=True)\n\n true_counts = bincount_along_axis(tf.reshape(true_mask, (config.batch_size, -1)),\n minlength=LCD.N_CLASSES, axis=-1)\n true_counts = true_counts / tf.math.reduce_sum(true_counts, -1, keepdims=True)\n \n score = np.mean(np.sum((true_counts + e) * np.log((true_counts + e)/(pred_counts+e)), axis = 1))\n\n return score\n \n \nclass PlotCallback(tf.keras.callbacks.Callback):\n \"\"\"A callback used to display sample predictions during training.\"\"\"\n from IPython.display import clear_output\n\n def __init__(self, dataset: tf.data.Dataset=None,\n sample_batch: tf.Tensor=None,\n save_folder: Path=None,\n num: int=1,\n ipython_mode: bool=False):\n super(PlotCallback, self).__init__()\n self.dataset = dataset\n self.sample_batch = sample_batch\n self.save_folder = save_folder\n self.num = num\n self.ipython_mode = ipython_mode\n\n def on_epoch_begin(self, epoch, logs=None):\n if self.ipython_mode:\n self.clear_output(wait=True)\n if self.save_folder:\n save_filepaths = [self.save_folder+'/'+f'epoch{epoch}_plot_{n}.png' for n in range(1, self.num+1)]\n else:\n save_filepaths = None\n plot_predictions(self.model, self.dataset, self.sample_batch, num=self.num, save_filepaths=save_filepaths)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser('Training script')\n parser.add_argument('--config', '-c', type=str, required=True, help=\"The YAML config file\")\n cli_args = parser.parse_args()\n # parse the config file\n with open(cli_args.config, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n config = YamlNamespace(config)\n config.xp_rootdir = Path(config.xp_rootdir).expanduser()\n assert config.xp_rootdir.is_dir()\n config.dataset_folder = Path(config.dataset_folder).expanduser()\n assert config.dataset_folder.is_dir()\n if config.val_samples_csv is not None:\n config.val_samples_csv = Path(config.val_samples_csv).expanduser()\n assert config.val_samples_csv.is_file()\n\n return config\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n import multiprocessing\n \n config = _parse_args()\n print(f'Config:\\n{config}')\n # set random seed for reproducibility\n if config.seed is not None:\n random.seed(config.seed)\n np.random.seed(config.seed)\n tf.random.set_seed(config.seed)\n\n N_CPUS = multiprocessing.cpu_count()\n\n print('Instanciate train and validation datasets')\n train_files = list(config.dataset_folder.glob('train/images/*.tif'))\n # shuffle list of training samples files\n train_files = random.sample(train_files, len(train_files))\n devset_size = len(train_files)\n trainset_size = len(train_files)\n \n train1 = train_files[:int(trainset_size*0.8)]\n train2 = train_files[int(trainset_size*0.2):]\n train3 = train_files[:int(trainset_size*0.2)] + train_files[int(trainset_size*0.4):]\n train4 = train_files[:int(trainset_size*0.4)] + train_files[int(trainset_size*0.6):]\n train5 = train_files[:int(trainset_size*0.6)] + train_files[int(trainset_size*0.8):]\n\n val1 = train_files[int(trainset_size*0.8):]\n val2 = train_files[:int(trainset_size*0.2)]\n val3 = train_files[int(trainset_size*0.2):int(trainset_size*0.4)]\n val4 = train_files[int(trainset_size*0.4):int(trainset_size*0.6)]\n val5 = train_files[int(trainset_size*0.6):int(trainset_size*0.8)]\n \n trainset_size = len(train1)\n valset_size = len(val1)\n\n\n\n \n def map_train_dataset(train_files): \n train_dataset = tf.data.Dataset.from_tensor_slices(list(map(str, train_files)))\\\n .map(parse_image, num_parallel_calls=N_CPUS)\n train_dataset = train_dataset.map(load_image_train, num_parallel_calls=N_CPUS)\\\n .shuffle(buffer_size=1024, seed=config.seed)\\\n .repeat()\\\n .batch(config.batch_size)\\\n .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return train_dataset\n\n def map_val_dataset(val_files):\n val_dataset = tf.data.Dataset.from_tensor_slices(list(map(str, val_files)))\\\n .map(parse_image, num_parallel_calls=N_CPUS)\n val_dataset = val_dataset.map(load_image_test, num_parallel_calls=N_CPUS)\\\n .repeat()\\\n .batch(config.batch_size)\\\n .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return( val_dataset)\n \n \n train_data1 = map_train_dataset(train1)\n train_data2 = map_train_dataset(train2)\n train_data3 = map_train_dataset(train3)\n train_data4 = map_train_dataset(train4)\n train_data5 = map_train_dataset(train5)\n\n val_data1 = map_val_dataset(val1)\n val_data2 = map_val_dataset(val2)\n val_data3 = map_val_dataset(val3)\n val_data4 = map_val_dataset(val4)\n val_data5 = map_val_dataset(val5)\n\n\n\n\n \n\n # Where to write files for this experiments\n xp_dir = os.path.join(config.xp_rootdir, datetime.datetime.now().strftime(\"%d-%m-%Y_%H:%M:%S\") + '/')\n os.mkdir(xp_dir)\n os.mkdir(os.path.join(xp_dir, 'tensorboard'))\n os.mkdir(os.path.join(xp_dir, 'plots'))\n os.mkdir(os.path.join(xp_dir, 'checkpoints'))\n \n \n \n\n \n \n\n \n \n \n # create the U-Net model to train\n unet_kwargs = dict( input_shape=(LCD.IMG_SIZE, LCD.IMG_SIZE, LCD.N_CHANNELS),\n num_classes=LCD.N_CLASSES,\n num_layers=2)\n\n\n\n # compute class weights for the loss: inverse-frequency balanced\n # note: we set to 0 the weights for the classes \"no_data\"(0) and \"clouds\"(1) to ignore these\n class_weight = np.zeros(LCD.N_CLASSES)\n class_weight[2:] = (1 / LCD.TRAIN_CLASS_COUNTS[2:])* LCD.TRAIN_CLASS_COUNTS[2:].sum() / (LCD.N_CLASSES-2)\n print(f\"Will use class weights: {class_weight}\")\n\n \n\n # Launch training\n dataset = ((train_data1,val_data1),(train_data2,val_data2),(train_data3,val_data3),(train_data4,val_data4),(train_data5,val_data5))\n i = 1\n for train, val in dataset:\n \n print(f\"Creating U-Net with arguments: {unet_kwargs}\")\n model = UNet(**unet_kwargs)\n #print(model.summary())\n\n # get optimizer, loss, and compile model for training\n optimizer = tf.keras.optimizers.Adam(lr=config.lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)\n print(\"Compile model\")\n model.compile(optimizer=optimizer,\n loss=loss,\n metrics=[custom_KLD],\n run_eagerly=True) # Needed to transform tensor to np.array in the custom metric\n \n \n # Décommenter les 5 lignes suivantes pour enregistrer des images avec le True mask et le Pred mask à chaque epoch \n # Attention cela ralenti beaucoup le modèle\n \n \n #for image, mask in train_dataset.take(1):\n #sample_batch = (image[:5, ...], mask[:5, ...])\n #callbacks = [ \n #PlotCallback(sample_batch=sample_batch, save_folder=xp_dir +'plots', num=5),\n #tf.keras.callbacks.TensorBoard(log_dir=xp_dir +'tensorboard',update_freq='epoch'),\n \n \n callbacks = [ # Commenter cette ligne si le block précédent a été décommenté\n tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_custom_KLD', verbose=1),\n tf.keras.callbacks.ModelCheckpoint(filepath=xp_dir +'checkpoints/set' + str(i) + 'epoch{epoch}', monitor='val_custom_KLD', save_best_only=True, verbose=0),\n tf.keras.callbacks.CSVLogger(filename=(xp_dir +'fit_logs_set' + str(i) +'.csv')),\n tf.keras.callbacks.ReduceLROnPlateau(patience=5, monitor='val_custom_KLD',factor=0.5,verbose=1,)]\n \n print(\"Training for set number : \", i)\n model_history = model.fit(train, epochs=config.epochs,\n callbacks=callbacks,\n steps_per_epoch= trainset_size // config.batch_size,\n validation_data=val,\n validation_steps=valset_size // config.batch_size,\n class_weight=class_weight)\n i +=1","sub_path":"framework/train_crossval.py","file_name":"train_crossval.py","file_ext":"py","file_size_in_byte":9932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382220859","text":"from django.conf.urls import url\nfrom articles import views\n\nurlpatterns = [\n url(r'^images/thumbnail/(?P[0-9]+)/$',\n views.image_thumbnail), # 이미지 썸네일을 얻는 라우트\n url(r'^images/(?P[0-9]+)/$', views.image), # 이미지를 얻는 라우트\n url(r'^images/$', views.create_image), # 이미지를 업로드하는 라우트\n url(r'^(?P[0-9]+)/$',\n views.ArticleDetail.as_view()), # 해당되는 게시글을 얻�� 라우트\n url(r'^$', views.ArticleList.as_view()), # 게시글을 업로드하거나, 리스트를 얻을 수 있는 라우트\n]\n","sub_path":"AppJamDjango/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"625434226","text":"\"\"\"\n.. module:: OC03\n\n***************\n OC03 Module\n***************\n\nThis is a Module for the OC03 Relay Out Low Voltage.\nThe board is based off the PCA9554A I/O expander manufactured by Texas Instruments.\nThe Module implements the PCA9554A to drive a solid state relay utilizing the `OC03 xChip `_.\nThe board uses I2C for communication.\n\nData Sheets:\n\n- `PCA9554A `_\n- `TLP241A `_\n\n \"\"\"\n\nimport i2c\n\nPCA9554A_I2C_ADDRESS = 0x38\nPCA9554A_REG_INPUT_PORT = 0x00\nPCA9554A_REG_OUTPUT_PORT = 0x01\nPCA9554A_REG_POL_INVERSION = 0x02\nPCA9554A_REG_CONFIG = 0x03\n\nPCA9554A_CONF_OUTPUT = 0x00\nPCA9554A_CONF_INPUT = 0xFF\n\nPCA9554A_ALL_OUTPUTS_OFF = 0x00\n\n\nclass OC03(i2c.I2C):\n \"\"\"\n\n===============\nOC03 class\n===============\n\n.. class:: OC03(self, drvname, addr = PCA9554A_I2C_ADDRESS , clk = 100000)\n\n Create an instance of the OC03 class.\n\n :param drvname: I2C Bus used '( I2C0, ... )'\n :param addr: Slave address, default 0x38\n :param clk: Clock speed, default 100kHz\n\n \"\"\"\n\n def __init__(self, drvname, addr=PCA9554A_I2C_ADDRESS, clk=100000):\n i2c.I2C.__init__(self, drvname, addr, clk)\n self._addr = addr\n try:\n self.start()\n except PeripheralError as e:\n print(e)\n\n def init(self, state=PCA9554A_ALL_OUTPUTS_OFF):\n \"\"\"\n.. method:: init(self, pins = PCA9554A_ALL_OUTPUTS_OFF)\n\n Configures PCA9554A and sets all outputs False by default\n\n :param pins: initializes the relay state. Accepts True (relay closed) and False (relay open)\n\n \"\"\"\n self.writePin(state)\n self.write_bytes(PCA9554A_REG_CONFIG, PCA9554A_CONF_OUTPUT)\n return True\n\n def writePin(self, state):\n \"\"\"\n.. method:: writePin(self, state)\n\n Determines the status of the relay output\n\n :param state: accepts True (relay closed) and False (relay open)\n\n \"\"\"\n if state == True:\n self.write_bytes(PCA9554A_REG_OUTPUT_PORT, 0x01)\n elif state == False:\n self.write_bytes(PCA9554A_REG_OUTPUT_PORT, 0x00)\n\n def getStatus(self):\n \"\"\"\n.. method:: getStatus(self)\n\n Reads the status of the relay.\n\n returns the status of the relay.\n\n \"\"\"\n pin_state = self.write_read(PCA9554A_REG_OUTPUT_PORT, 1)[0]\n return pin_state\n","sub_path":"oc03.py","file_name":"oc03.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"607684664","text":"from PPA.classes import PPAProcess\nimport csv\n\n\nclass DataExporter:\n\n def export_data(run_n: int, benchmark_name: str, survivor_selection_name: str, dimensions: int,\n optimum: float, ppa: PPAProcess, filename: str):\n # we save: best individual during run,\n fields_one = ['run_n', 'selection_method', 'benchmark_name', 'dimensions', 'optimum', 'best_during run']\n data_one = [run_n, survivor_selection_name, benchmark_name, dimensions, optimum, ppa.best_objval_during_run.objective_value]\n with open(filename.replace('.csv', '_performance.csv'), 'w') as f:\n write = csv.writer(f)\n write.writerow(fields_one)\n write.writerow(data_one)\n\n best_individual_in_generation = ppa.heritage.best_individual_in_generation\n fields_two = ['objective_value', 'generation', 'evaluations']\n with open(filename.replace('.csv', '_performance_over_generations.csv'), 'w') as f:\n write = csv.writer(f)\n write.writerow(fields_two)\n write.writerows(best_individual_in_generation)\n\n ranks_per_generation = ppa.heritage.ranks_per_generation\n fields_three = ['rank_data']\n with open(filename.replace('.csv', '_fitness_ranks.csv'), 'w') as f:\n write = csv.writer(f)\n write.writerow(fields_three)\n write.writerows(ranks_per_generation)\n\n unique_individuals = ppa.heritage.unique_individual_count\n fields_four = ['generation','unique_ids']\n with open(filename.replace('.csv', '_unique_individuals.csv'), 'w') as f:\n write = csv.writer(f)\n write.writerow(fields_four)\n write.writerows(unique_individuals)\n\n\n","sub_path":"classes/DataExporter.py","file_name":"DataExporter.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"275546110","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTranspose and Flatten\n\n\"\"\"\n\nimport numpy\n\nx = list(map(int, input().split()))\n\na = x[0]\nb = x[1]\n\ndata = []\n\nfor i in range(a):\n y = list(map(int, input().split()))\n data.append(y)\n\narr = numpy.array([z for z in data])\n\ntranspose = numpy.transpose(arr)\n\nflatten = arr.flatten()\n\nprint(transpose)\nprint(flatten)\n","sub_path":"Problem 1/13 - Numpy/03_transpose_and_flatten.py","file_name":"03_transpose_and_flatten.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434260710","text":"import os\nfrom datetime import datetime\nfrom utils.genericlog import Logger\n\nclass FunctionTrace:\n def __init__(self, function):\n self.function = function\n\n def __call__(self, *args, **kwargs):\n\n # Funciton return value\n return_value = None\n\n # Start time\n function_start = datetime.now()\n\n # Base message\n spacer = '\\t' * 8\n out_message_base = \"Module: {} - Function: {} \".format(\n self.function.__module__,\n self.function.__name__)\n\n out_message_base += \"\\n{}ARGUMENTS: {}\".format(spacer, args)\n\n try:\n # Execute funciton, if exception log it\n return_value = self.function(*args, **kwargs)\n except Exception as ex:\n out_message_base += \"\\n{}EXCEPTION: {}\".format(spacer, str(ex))\n\n # Add function return\n out_message_base += \"\\n{}RETURNS: {}\".format(spacer, return_value)\n\n # Add clock to function\n span = datetime.now() - function_start\n out_message_base += \"\\n{}EXECUTION: {}\".format(spacer, str(span))\n\n # Finally log it and return the function return value\n Logger.add_log(out_message_base)\n\n return return_value\n","sub_path":"sql_storage/utils/tracelog.py","file_name":"tracelog.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564264654","text":"import pytest\nimport numpy as np\nfrom sklearn import linear_model\n\n\ndef basic_tests():\n reg = linear_model.LogisticRegression()\n reg.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])\n print(reg.coef_)\n\n reg = linear_model.Ridge(alpha=.5)\n reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1])\n print(reg.coef_)\n print(reg.intercept_)\n\n reg = linear_model.RidgeCV(alphas=np.logspace(-6, 6, 13))\n reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1])\n print(reg.alpha_)\n\n reg = linear_model.Lasso(alpha=0.1)\n reg.fit([[0, 0], [1, 1]], [0, 1])\n reg.predict([[1, 1]])\n\n reg = linear_model.LassoLars(alpha=.1)\n reg.fit([[0, 0], [1, 1]], [0, 1])\n print(reg.coef_)\n\n reg = linear_model.LassoLars(alpha=.1)\n reg.fit([[0, 0], [1, 1]], [0, 1])\n reg.coef_\n\n # orthogonal_matching_pursuit()\n\n # Bayesian Ridge Regression is used for regression:\n X = [[0., 0.], [1., 1.], [2., 2.], [3., 3.]]\n Y = [0., 1., 2., 3.]\n reg = linear_model.BayesianRidge()\n reg.fit(X, Y)\n reg.predict([[1, 0.]])\n print(reg.coef_)\n\n\n@pytest.mark.xfail\n@pytest.mark.usefixtures(\"turn_numpy_ufunc_on\", \"cleandir\")\ndef test_trace_only_ufunc_on(script_runner):\n ret = script_runner.run(\"pytracer\", \"trace\",\n f\"--module {__file__}\")\n assert ret.success\n\n\n@pytest.mark.usefixtures(\"turn_numpy_ufunc_off\", \"cleandir\")\ndef test_trace_only_ufunc_off(script_runner):\n ret = script_runner.run(\"pytracer\", \"trace\",\n f\"--module {__file__}\")\n assert ret.success\n\n\n@pytest.mark.usefixtures(\"turn_numpy_ufunc_off\", \"cleandir\", \"parse\")\ndef test_trace_parse(nsamples, script_runner):\n for _ in range(nsamples):\n ret = script_runner.run(\"pytracer\", \"trace\",\n f\"--module {__file__}\")\n assert ret.success\n\n\nif __name__ == \"__main__\":\n import time\n print(time.localtime())\n basic_tests()\n print(\"End\")\n","sub_path":"pytracer/test/sklearn/test_basic_tests.py","file_name":"test_basic_tests.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"464614326","text":"## A Quicklist for Opera ##\n\n# if the quicklist options will be arguments (not full commands), put %s where the arguments should go\ncommand = 'opera %s'\n\n# the magic!\nql = {'New Tab': '-newtab',\n 'New Private Tab': '-newprivatetab',\n 'New Window': '-newwindow',\n 'Mail': '-mail'}\n","sub_path":"data/quicklists/opera.py","file_name":"opera.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115279990","text":"n=int(input())#input\nlis=list(map(int,input().split()))\na=[]\nd={ }\nfor i in lis:\n s=lis.count(i)\n d.update({i:s})\n a.append(s)\nw=max(a)\nif w>1:\n c=0\n for x,y in d.items():\n if c==0:\n if y>1:\n print(x,end=\"\")\n c+=1\n else:\n if y>1:\n print(\"\",x,end=\"\")\nelse:\n print(\"unique\")\n","sub_path":"repeat more 1 hunter.py","file_name":"repeat more 1 hunter.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"464279039","text":"from collections import defaultdict\nfrom functools import reduce\n\nfrom brownie import ZERO_ADDRESS, chain, convert\nfrom brownie.network.account import Accounts\nfrom brownie.network.contract import Contract\nfrom brownie.test import strategy\n\n\nclass StateMachine:\n\n st_addr = strategy(\"address\")\n st_id = strategy(\"uint96\")\n\n def __init__(cls, accounts: Accounts, crv: Contract, veboost: Contract, vecrv: Contract):\n cls.alice = accounts[0]\n cls.accounts = accounts\n cls.crv = crv\n cls.veboost = veboost\n cls.vecrv = vecrv\n\n def setup(self):\n self.total_supply = 0\n self.ownership = defaultdict(set)\n self.delegator_tokens = defaultdict(set)\n\n alice_balance = self.crv.balanceOf(self.alice)\n dividend = alice_balance // len(self.accounts)\n\n for acct in self.accounts:\n self.crv.transfer(acct, dividend, {\"from\": self.alice})\n\n for acct in self.accounts:\n self.crv.approve(self.vecrv, 2 ** 256 - 1, {\"from\": acct})\n self.vecrv.create_lock(dividend, chain.time() + 86400 * 365, {\"from\": acct})\n\n def rule_mint(self, st_addr, st_id):\n token_id = self.veboost.get_token_id(st_addr, st_id)\n if self.veboost.ownerOf(token_id) != ZERO_ADDRESS:\n return\n self.veboost.create_boost(\n st_addr, st_addr, 5_000, 0, chain.time() + 86400 * 31, st_id, {\"from\": st_addr}\n )\n\n self.ownership[st_addr].add(token_id)\n self.delegator_tokens[st_addr].add(token_id)\n self.total_supply += 1\n\n def rule_burn(self):\n if self.total_supply == 0:\n return\n\n token_id = reduce(lambda a, b: a | b, self.ownership.values(), set()).pop()\n delegator = self.accounts.at(\n convert.to_address(convert.to_bytes(token_id >> 96, \"bytes20\"))\n )\n _from = self.veboost.ownerOf(token_id)\n self.veboost.burn(token_id, {\"from\": _from})\n\n self.ownership[_from].remove(token_id)\n self.delegator_tokens[delegator].remove(token_id)\n self.total_supply -= 1\n\n def rule_transfer(self, st_addr):\n if self.total_supply == 0:\n return\n to = st_addr\n token_id = reduce(lambda a, b: a | b, self.ownership.values(), set()).pop()\n _from = self.veboost.ownerOf(token_id)\n\n self.veboost.transferFrom(_from, to, token_id, {\"from\": _from})\n\n self.ownership[_from].remove(token_id)\n self.ownership[to].add(token_id)\n\n def invariant_balanceOf(self):\n for acct in self.accounts:\n assert self.veboost.balanceOf(acct) == len(self.ownership[acct])\n\n def invariant_tokenOfOwnerByIndex(self):\n\n for acct in self.accounts:\n tokens = {\n self.veboost.tokenOfOwnerByIndex(acct, i) for i in range(len(self.ownership[acct]))\n }\n assert tokens == self.ownership[acct]\n\n def invariant_tokenByIndex(self):\n tokens = reduce(lambda a, b: a | b, self.ownership.values(), set())\n chain_tokens = {self.veboost.tokenByIndex(i) for i in range(len(tokens))}\n\n assert tokens == chain_tokens\n\n def invariant_delegator_total_minted(self):\n for acct in self.accounts:\n assert self.veboost.total_minted(acct) == len(self.delegator_tokens[acct])\n\n def invariant_delegator_tokens(self):\n for acct in self.accounts:\n tokens = {\n self.veboost.token_of_delegator_by_index(acct, i)\n for i in range(len(self.delegator_tokens[acct]))\n }\n assert tokens == self.delegator_tokens[acct]\n\n\ndef test_state_machine(state_machine, accounts, crv, vecrv, veboost):\n state_machine(StateMachine, accounts, crv, veboost, vecrv, settings={\"stateful_step_count\": 50})\n","sub_path":"tests/token/test_enumeration_state.py","file_name":"test_enumeration_state.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270659494","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom prometheus_client import start_http_server, Summary\nimport random\nimport time\n\n\"\"\"\n创建一个Summary类型的指标,Summary是一个Collector对象;\n 1. 第一个参数是metric的name\n 2. 第二个参数是metric的Help信息\n 3. 第三个参数是指标的label名称\n\"\"\"\nREQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request', [\"program\"])\nREQUEST_TIME_WITH_LABEL = REQUEST_TIME.labels(program=\"Test\") # 在指定的label上赋值\n\n\n# 通过装饰器的方式对process_request的执行时间进行采样\n@REQUEST_TIME_WITH_LABEL.time()\ndef process_request(t):\n \"\"\"A dummy function that takes some time.\"\"\"\n time.sleep(t)\n\n\nif __name__ == '__main__':\n # 创建http服务,将metric暴露给采集器\n start_http_server(6789)\n # Generate some requests.\n while True:\n process_request(random.random())\n","sub_path":"awesome-python2/mem_monitor/prometheus_demo.py","file_name":"prometheus_demo.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"192542286","text":"\n\nimport sys\nimport os\nimport glob\nimport logging\nimport shutil\nimport hashlib\nfrom pathlib import Path\nfrom subprocess import *\n\nlogFormatter = logging.Formatter(\"%(asctime)s [%(levelname)-5.5s] %(message)s\",datefmt='%Y-%m-%d %H:%M:%S')\nrootLogger = logging.getLogger()\nrootLogger.setLevel(logging.DEBUG)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nrootLogger.addHandler(consoleHandler)\nlogger = logging.getLogger()\n\n\n\ndef run_cmd(cmd, workdir):\n p = Popen(cmd, stdout=PIPE, stderr=STDOUT, bufsize=1, cwd=workdir)\n\n for line in iter(p.stdout.readline, b''):\n tmp = str(line)\n if tmp.endswith(\"\\\\r\\\\n'\"):\n logger.info(tmp[2:len(tmp)-5])\n elif tmp.endswith(\"\\\\r\\\\n\\\"\"):\n logger.info(tmp[2:len(tmp)-5])\n else:\n logger.info(tmp[2:len(tmp)-3])\n p.stdout.close()\n p.wait()\n\n","sub_path":"scripts/Modules/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"441450215","text":"import numpy as np\nimport torch\nfrom sklearn.preprocessing import StandardScaler\nimport time\nimport torch.utils.data as Data\nfrom torch import nn\nimport torch.nn.functional as F\nfrom sklearn.model_selection import train_test_split\n\nclass ConvNet(nn.Module):\n def __init__(self, num_classes=2):\n super(ConvNet, self).__init__()\n self.layer = nn.Sequential(\n nn.Conv1d(1, 32, kernel_size=5, stride=3),\n nn.BatchNorm1d(32),\n nn.ReLU(),\n nn.Conv1d(32, 24, kernel_size=3, stride=2),\n nn.BatchNorm1d(24),\n nn.ReLU(),\n nn.Conv1d(24, 16, kernel_size=3, stride=2),\n nn.BatchNorm1d(16),\n nn.ReLU(),\n nn.Dropout(0.5))\n self.fc1 = nn.Linear(2544, 20)\n self.fc2 = nn.Linear(20, num_classes)\n\n def forward(self, x):\n out = self.layer(x)\n out = out.reshape(out.size(0), -1)\n out = self.fc1(out)\n out = F.relu(out)\n out = F.dropout(out, p=0.5, training=self.training)\n out = self.fc2(out)\n return out\n\n\n\ndef data_prepare(X, y, BATCH_SIZE = 40, shuffle = True):\n\n X = torch.from_numpy(X)\n y = torch.from_numpy(y)\n\n print(X.size(),y.size())\n torch_dataset = Data.TensorDataset(X, y) # 把数据放在数据库中\n loader = Data.DataLoader(\n # 从dataset数据库中每次抽出batch_size个数据\n dataset=torch_dataset,\n batch_size=BATCH_SIZE,\n shuffle=shuffle, # 将数据打乱\n num_workers=2, # 使用两个线程\n )\n return loader\n\ndef run(X_train,save_name):\n\n label_a = np.load('./data/label_a.npy')\n label_v = np.load('./data/label_v.npy')\n\n y_train = label_a\n X_train = StandardScaler().fit_transform(X_train)\n X_train = X_train.reshape(1280, 1, 1920)\n print(X_train.shape)\n X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.1, random_state=43)\n print(X_train.shape, y_train.shape)\n train_loader = data_prepare(X_train, y_train, BATCH_SIZE=40)\n test_loader = data_prepare(X_test, y_test, shuffle=False, BATCH_SIZE=40)\n\n num_epochs = 5\n num_classes = 2\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = ConvNet(num_classes).to(device)\n # print(model)\n # Loss and optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer1 = torch.optim.Adam(model.parameters(), lr=0.001, eps=1e-3)\n optimizer2 = torch.optim.RMSprop(model.parameters(), lr=0.001, alpha=0.9)\n optimizer3 = torch.optim.SGD(model.parameters(), lr=0.00001, weight_decay=1e-6, momentum=0.9, nesterov=True)\n\n # Train the model\n total_step = len(train_loader)\n print('total_step: %d'%total_step)\n for epoch in range(num_epochs):\n print('epoch : %d'%(epoch+1))\n model.train()\n for i, (input, labels) in enumerate(train_loader):\n input = input.to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(input.float())\n loss = criterion(outputs, labels.long())\n # print(loss)\n # Backward and optimize\n optimizer1.zero_grad()\n loss.backward()\n optimizer1.step()\n if (i + 1) % 31 == 0:\n print('Epoch [{}/{}], Step [{}/{}] Loss: {}'\n .format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n\n # Test the model\n model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)\n with torch.no_grad():\n correct = 0\n total = 0\n for input, labels in test_loader:\n input = input.to(device)\n labels = labels.to(device)\n outputs = model(input.float())\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted.int() == labels.int()).sum().item()\n print('Test Accuracy of the model on the test data: {} %'.format(100 * correct / total))\n\n with torch.no_grad():\n correct = 0\n total = 0\n for input, labels in train_loader:\n input = input.to(device)\n labels = labels.to(device)\n outputs = model(input.float())\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted.int() == labels.int()).sum().item()\n print('Test Accuracy of the model on the train data: {} %'.format(100 * correct / total))\n\n torch.save(model.state_dict(), \"./data/\" + save_name + \".ckpt\")\n\ndef loop():\n data = np.load('./data/seg_data.npy')\n print(data.shape)\n for ch in range(10):\n for seg in range(4):\n X_train = data[ch][seg]\n save_name = \"Ch\" + str(ch) + \"Seg\" + str(seg)\n run(X_train, save_name)\n\ndef test2():\n device = torch.device(\"cuda\")\n data = np.load('./data/seg_data.npy')\n X_train = data[0][0][:40]\n print(X_train.shape)\n X_train = StandardScaler().fit_transform(X_train)\n X_train = X_train.reshape(-1, 1, 1920)\n X_train = torch.from_numpy(X_train)\n X_train = X_train.cuda().float()\n print(X_train.device)\n model = ConvNet(2).to(device)\n model.load_state_dict(torch.load(\"./DNNmodel.ckpt\", map_location=device))\n\n # model.eval()\n outputs = model(X_train)\n\nif __name__ == '__main__':\n loop()\n","sub_path":"CNN3.py","file_name":"CNN3.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"550243782","text":"from django.conf import settings\nfrom django.urls import re_path\nfrom django.views.generic import TemplateView\n\n\nif getattr(settings, 'INVITATION_USE_ALLAUTH', False):\n from allauth.account.forms import BaseSignupForm as RegistrationFormTermsOfService\n reg_backend = 'allauth.account.auth_backends.AuthenticationBackend'\nelse:\n from registration.forms import RegistrationFormTermsOfService\n reg_backend = 'registration.backends.default.DefaultBackend'\n \nfrom invitation.views import invite, invited, register, send_bulk_invitations, token\n\nurlpatterns = [\n re_path(r'^invite/complete/$',\n TemplateView.as_view(template_name='invitation/invitation_complete.html'),\n name='invitation_complete'),\n re_path(r'^invite/$',\n invite,\n name='invitation_invite'),\n re_path(r'^invite/bulk/$',\n send_bulk_invitations,\n name='invitation_invite_bulk'),\n re_path(r'^invited/(?P\\w+)&(?P\\S+@\\S+)?/$', \n invited,\n name='invitation_invited'),\n re_path(r'^register/$',\n register,\n { 'backend': reg_backend },\n name='registration_register'),\n re_path(r'^token/(?P\\w+)/$', \n token,\n name='invitation_token'),\n]\n","sub_path":"invitation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"501139254","text":"from django import forms\n\nfrom fas_questionnaire.forms.common import ListTextWidget\nfrom ..models.page_13 import PatternOfAgriculturalLabouringOut, TypeOfWage, WageUnit\nfrom ..models.common import Crop, Sex, PlaceOfWork\nfrom ..models.page1 import HouseholdMembers\n\n\nclass PatternOfAgriculturalLabouringOutForm(forms.ModelForm):\n \"\"\"Pattern of Agricultural Labouring Out Form\"\"\"\n class Meta:\n model = PatternOfAgriculturalLabouringOut\n exclude = ['household']\n widgets = None\n localized_fields = None\n labels = {}\n help_texts = {}\n error_messages = {}\n\n def __init__(self, *args, **kwargs):\n super(PatternOfAgriculturalLabouringOutForm, self).__init__(*args, **kwargs)\n sex_list = Sex.objects.values_list('sex')\n self.fields['sex'].widget = ListTextWidget(data_list=sex_list, name='sex_list')\n\n crop_list = Crop.objects.values_list('name')\n self.fields['crop'].widget = ListTextWidget(data_list=crop_list, name='crop_list')\n\n wages_list = TypeOfWage.objects.values_list('type')\n self.fields['type_of_wage'].widget = ListTextWidget(data_list=wages_list, name='wages_list')\n\n places_list = PlaceOfWork.objects.values_list('place')\n self.fields['place_of_work'].widget = ListTextWidget(data_list=places_list, name='places_list')\n\n wage_unit_list = WageUnit.objects.values_list('unit')\n self.fields['unit'].widget = ListTextWidget(data_list=wage_unit_list, name='wage_unit_list')\n\n","sub_path":"fas_questionnaire_site/fas_questionnaire/forms/page_13.py","file_name":"page_13.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25021785","text":"\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static, serve\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib import admin\nfrom blog import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.index, name='index'),\n url(r'^new/', views.new, name='new'),\n url(r'^delete/', views.delete, name='delete'),\n url(r'^edit/', views.edit, name='edit'),\n url(r'^view_entry/(?P\\d+)*$', views.entry, name='view_entry'),\n url(r'^show/', views.show, name='show'),\n url(r'^accounts/', include('accounts.urls')),\n url(r'^art/', include('art.urls')),\n #url(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"616263839","text":"'''\n\nI think we all know what this problem is. \n'''\n\ndef three_sum(arr):\n if not nums:\n return []\n result = []\n nums.sort()\n for i in range(0, len(nums)-2):\n target = -nums[i]\n left = i+1\n right = len(nums)-1\n if i > 0 and nums[i] == nums[i-1]:\n continue\n while left < right:\n if nums[left] + nums[right] == target:\n result.append([nums[i], nums[left], nums[right]])\n left += 1\n right -= 1\n while left < right:\n if nums[left] != nums[left-1]:\n break\n left += 1\n while left < right:\n if nums[right] != nums[right+1]:\n break\n right -= 1\n elif nums[left] + nums[right] > target:\n right -= 1\n else:\n left += 1\n return result\n","sub_path":"other/array/3sum.py","file_name":"3sum.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"508047998","text":"from django.dispatch import receiver\nfrom src.aggregates.client.services import client_service\nfrom src.aggregates.engagement_assignment.models import EngagementAssignment\nfrom src.aggregates.engagement_assignment.signals import created\nfrom src.aggregates.engagement_opportunity.services import engagement_opportunity_service\nfrom src.aggregates.profile.services import profile_service\nfrom src.apps.graph import constants\nfrom src.apps.graph.engagement_assignment.services import engagement_assignment_graph_tasks\nfrom src.libs.common_domain.decorators import event_idempotent\n\n\n@event_idempotent\n@receiver(created)\ndef engagement_assignment_created_callback(**kwargs):\n client_id = kwargs.pop('client_id')\n assignment_attrs = kwargs.pop('assignment_attrs')\n\n if constants.ASSIGNED_EO_IDS in assignment_attrs:\n\n eo_uids = []\n for eo_id in assignment_attrs[constants.ASSIGNED_EO_IDS]:\n eo = engagement_opportunity_service.get_engagement_opportunity(eo_id)\n eo_uids.append(eo.engagement_opportunity_uid)\n assignment_attrs[constants.ASSIGNED_EO_UIDS] = eo_uids\n del assignment_attrs[constants.ASSIGNED_EO_IDS]\n\n if constants.ASSIGNED_PROFILE_IDS in assignment_attrs:\n\n profile_uids = []\n for profile_id in assignment_attrs[constants.ASSIGNED_PROFILE_IDS]:\n profile = profile_service.get_profile(profile_id)\n profile_uids.append(profile.profile_uid)\n assignment_attrs[constants.ASSIGNED_PROFILE_UIDS] = profile_uids\n del assignment_attrs[constants.ASSIGNED_PROFILE_IDS]\n\n client_uid = client_service.get_client_from_id(client_id).client_uid\n\n engagement_assignment_graph_tasks.create_engagement_assignment_in_graphdb_task.delay(\n kwargs['engagement_assignment_uid'],\n client_uid,\n assignment_attrs\n )\n","sub_path":"src/apps/graph/engagement_assignment/event_handlers.py","file_name":"event_handlers.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232516617","text":"from time import time\n\n\nwith open('buy.csv') as f1:\n f1.__next__()\n keys_buy = [x.split('\\t')[0] for x in f1]\n\n\nwith open('essay.csv') as f2:\n f2.__next__()\n keys_essay = [x.split('\\t')[0] for x in f2][:10000]\n\n\nt1 = time()\n\nresults = list()\n\nfor k in keys_buy:\n if k in keys_essay:\n results.append(k)\n\nt2 = time()\n\n\nprint('All done! Time is: ', round(t2-t1, 4))\nprint('Results: ', len(results))\n","sub_path":"py4seo/Код с занятий/lesson5/diff_list.py","file_name":"diff_list.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131566018","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nfrom error_handler import *\nfrom history import history\nfrom cmd import Cmd\nimport yaml\nimport os\n\n# add path\nrootpath = os.path.abspath('..')\n\n# define dictionary process\nclass youdao_dict(object):\n # initiate basic settings\n def __init__(self, word, deepth=5):\n self.deepth = deepth\n self.word = word\n url = requests.get(\n 'http://www.youdao.com/w/eng/{}/#keyfrom=dict2.index'.format(word))\n url.encoding = 'utf-8'\n html = url.text\n self.soup = BeautifulSoup(html, 'lxml')\n\n # capture webpage\n def website_capture(self):\n self.translation = self.soup.find(\n name='div', attrs={'class': 'trans-container'})\n self.wordgroup = self.soup.find_all(\n name='p', attrs={'class': 'wordGroup'})\n self.example = self.soup.find(\n name='div', attrs={'id': 'examplesToggle'})\n\n # analyze and display search result\n def display(self):\n count = 0\n wordgroup_list = []\n example_list = []\n\n print('=============\\n' + self.word + '\\n=============')\n\n # print all Chinese explanation\n self.translation = self.translation.ul.get_text()\n print('{}'.format(self.translation), end='\\r')\n print('-----------------------------------------------')\n\n # print the first n wordgroups, n equals to self.deepth\n print('词组:')\n for i in self.wordgroup:\n wordgroup = i.a.get_text()\n meaning = re.sub(r'[a-zA-Z]*(\\s)', '', i.get_text())\n wordgroup_list.append(('{0:25}'.format(wordgroup) + '|' + meaning))\n try:\n print(wordgroup_list[-1])\n except UnicodeEncodeError:\n pass\n count += 1\n if count >= self.deepth:\n count = 0\n break\n print('-----------------------------------------------')\n\n # print the first 3 example sentence\n print('例句:')\n example_div = self.example.ul.find_all(name='li')\n for i in example_div:\n example = re.sub(r'[0-9a-z]*.[0-9a-z]*.com|《[\\u4e00-\\u9fa5]*》', '', i.get_text())\n example = re.sub(r'\\n', '', example, 3)\n example = example.replace('\\n', '/', 1)\n example = re.sub(r'\\n', '', example)\n example = re.split('/', example)\n example_list.append(('| ' + example[0] + '\\n' + example[1]))\n print(example_list[-1])\n\n # return translations, wordgroups and example sencences\n return self.translation, wordgroup_list, example_list\n\n# define CLI part\nclass Client(Cmd):\n \"\"\"docstring for Cli\"\"\"\n prompt = 'yuci> '\n intro = '''\n ===============================\\n\n ==Welcome to yuci-dictionary!==\\n\n ==========version 0.1.5========\\n'''\n\n # pre settings, loading configs from config.yml\n def __init__(self):\n Cmd.__init__(self)\n # get config from config.yml\n with open(rootpath + \"\\\\dictionary\\\\data\\\\config.yml\", 'r') as file:\n self.config = yaml.load(file)\n self.deepth = int(self.config['search_setting']['deepth'])\n self.history = bool(self.config['search_setting']['history'])\n\n # SETTING\n def do_setting(self, arg):\n # transform new changes from import.\n if arg != '':\n Input_check(arg, ['201'])\n arg = re.sub(r'([a-z]*)=|\\s*', '', arg)\n changes = arg.split(',')\n deepth = changes[0]\n history = changes[1]\n Input_check(deepth, ['203'])\n Input_check(history, ['202'])\n\n # display previous setting\n settings_warning = '''\n Be careful with the settings below.\n If you don\\'t know what are their usages,\n just keep it default.\\n\n '''\n print('|settings|\\n' + settings_warning)\n for i in self.config:\n print(i)\n for m in self.config[i]:\n print(' ' + m + ':' + self.config[i][m])\n\n if arg != '':\n if history is not True or deepth != 5:\n changes = {'search_setting': {'deepth': deepth, 'history': history}}\n print('-----------------\\n' + 'PLEACE CONFIRM:\\nNew changes: deepth:{0}, history:{1}\\n'.format(deepth, history))\n confirm = input('ARE YOU SURE TO APPLY CHANGES?(Y/N):')\n if confirm == 'Y':\n self.config = changes\n with open(rootpath + \"\\\\dictionary\\\\data\\\\config.yml\", 'w') as file:\n yaml.dump(self.config, file)\n print('Restart the program to active the changes.')\n elif confirm == 'N':\n pass\n\n def do_search(self, arg):\n # Check error in input\n Input_check(arg, ['101'])\n\n # web search & show result\n self.obj = youdao_dict(word=arg, deepth=self.deepth)\n self.obj.website_capture()\n translation, wordgroup, example = self.obj.display()\n\n # history recording\n if self.history is True:\n history(arg, self.deepth, self.history, translation, wordgroup, example)\n\n def do_version(self):\n print('yuci-dictionary version 0.1.4')\n\n def do_exit(self, arg):\n print('BYE!')\n return True\n\n # help files\n def help_search(self):\n print('Describtion: Search for a word\\nKeyword: word\\nKeyword_type: str\\nUsage: search word')\n\n def help_s(self):\n print('Describtion: Shortcut for search')\n\n def help_setting(self):\n print('Describtion: Change setting\\nKeyword: deepth, history\\nkeyword_type: int, bool\\nTip: The order must not be changed, every keyword must have a value.\\nUsage: setting deepth,history')\n\n def help_exit(self):\n print('Describtion: Exit the program\\nUsage: exit')\n\n def help_version(self):\n print('Describtion: Check program version\\nUsage: version')\n\n # CLI setting\n def default(self, line):\n print('No command find.')\n\n def emptyline(self):\n pass\n\n def precmd(self, line):\n print('-----------------------------')\n return Cmd.precmd(self, line)\n\n # shortcut setting\n do_s = do_search\n\ndef main():\n Client().cmdloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"dictionary/youdao_dict.py","file_name":"youdao_dict.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411652956","text":"import copy\nimport random\nfrom functools import wraps\nfrom batchgenerators.transforms import DataChannelSelectionTransform, SegChannelSelectionTransform, SpatialTransform, \\\n GammaTransform, MirrorTransform, Compose\nfrom batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \\\n ContrastAugmentationTransform, BrightnessTransform\nfrom batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform\nfrom batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform\nfrom batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor\nfrom tuframework.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params\nfrom tuframework.network_architecture.neural_network import SegmentationNetwork\nfrom tuframework.byol.aug3d import RandCrop_tu,Gaussiannoise_tu,Mirror_tu,Spatial_tansform_tu\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import transforms as T\n\n# helper functions\nclass ResNeXt3D(nn.Module):\n\n def __init__(self, block, layers, shortcut_type='B', cardinality=32, num_classes=400):\n self.inplanes = 64\n super(ResNeXt3D, self).__init__()\n self.conv1 = nn.Conv3d(1, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)\n # self.bn1 = nn.BatchNorm3d(64)\n self.gn1 = nn.GroupNorm(32, 64)\n self.relu = nn.PReLU()\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type, cardinality)\n self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, cardinality, stride=(1, 2, 2))\n self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, cardinality, stride=1)\n self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, cardinality, stride=1)\n self.avgpool = nn.AdaptiveAvgPool3d(1)\n self.fc = nn.Linear(1024, num_classes)\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n def _make_layer(self, block, planes, blocks, shortcut_type, cardinality, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False),\n # nn.BatchNorm3d(planes * block.expansion)\n nn.GroupNorm(32, planes * block.expansion),\n )\n layers = []\n layers.append(\n block(self.inplanes, planes, cardinality, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, cardinality))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n #print(\"RES:x.shape:\",x.shape)\n x = self.conv1(x)\n #print(\"RES:x.shape:\",x.shape)\n x = self.gn1(x)\n #print(\"RES:x.shape:\",x.shape)\n x = self.relu(x)\n #print(\"RES:x.shape:\",x.shape)\n x = self.maxpool(x)\n #print(\"RES:x.shape:\",x.shape)\n\n\n x = self.layer1(x)\n #print(\"RES:x.shape:\",x.shape)\n\n x = self.layer2(x)\n #print(\"RES:x.shape:\",x.shape)\n\n x = self.layer3(x)\n #print(\"RES:x.shape:\",x.shape)\n x = self.layer4(x)\n #print(\"RES:x.shape:\",x.shape)\n x = self.avgpool(x)\n #print(\"RES:x.shape:\",x.shape)\n x = x.view(x.size(0), -1)\n #print(\"RES:x.shape:\",x.shape)\n ###print(\"RES:x.shape:\",x.shape)\n return x\nclass ResNeXtBottleneck(nn.Module):\n expansion = 2\n def __init__(self, inplanes, planes, cardinality, stride=1,\n downsample=None):\n super(ResNeXtBottleneck, self).__init__()\n mid_planes = cardinality * int(planes / 32)\n self.conv1 = nn.Conv3d(inplanes, mid_planes, kernel_size=1, bias=False)\n self.gn1 = nn.GroupNorm(32, mid_planes)\n # self.bn1 = nn.BatchNorm3d(mid_planes)\n self.conv2 = nn.Conv3d(\n mid_planes,\n mid_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=cardinality,\n bias=False)\n self.gn2 = nn.GroupNorm(32, mid_planes)\n # self.bn2 = nn.BatchNorm3d(mid_planes)\n self.conv3 = nn.Conv3d(\n mid_planes, planes * self.expansion, kernel_size=1, bias=False)\n # self.bn3 = nn.BatchNorm3d(planes * self.expansion)\n self.gn3 = nn.GroupNorm(32, planes * self.expansion)\n self.relu = nn.PReLU()\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.gn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.gn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.gn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\ndef default(val, def_val):\n return def_val if val is None else val\n\ndef flatten(t):\n return t.reshape(t.shape[0], -1)\n\ndef singleton(cache_key):\n def inner_fn(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n instance = getattr(self, cache_key)\n if instance is not None:\n return instance\n\n instance = fn(self, *args, **kwargs)\n setattr(self, cache_key, instance)\n return instance\n return wrapper\n return inner_fn\n\ndef get_module_device(module):\n return next(module.parameters()).device\n\ndef set_requires_grad(model, val):\n for p in model.parameters():\n p.requires_grad = val\n\n# loss fn\n\ndef loss_fn(x, y):\n x = F.normalize(x, dim=-1, p=2)\n y = F.normalize(y, dim=-1, p=2)\n return 2 - 2 * (x * y).sum(dim=-1)\n\n# augmentation utils\n\nclass RandomApply(nn.Module):\n def __init__(self, fn, p):\n super().__init__()\n self.fn = fn\n self.p = p\n def forward(self, x):\n if random.random() > self.p:\n return x\n return self.fn(x)\n\n# exponential moving average\n\nclass EMA():\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_average(self, old, new):\n if old is None:\n return new\n return old * self.beta + (1 - self.beta) * new\n\ndef update_moving_average(ema_updater, ma_model, current_model):\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n old_weight, up_weight = ma_params.data, current_params.data\n ma_params.data = ema_updater.update_average(old_weight, up_weight)\n\n# MLP class for projector and predictor\n\nclass MLP(nn.Module):\n def __init__(self, dim, projection_size, hidden_size = 4096):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_size),\n nn.BatchNorm1d(hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, projection_size)\n )\n\n def forward(self, x):\n return self.net(x)\n\n# a wrapper class for the base neural network\n# will manage the interception of the hidden layer output\n# and pipe it into the projecter and predictor nets\n\nclass NetWrapper(nn.Module):\n def __init__(self, net, projection_size, projection_hidden_size, layer = -2):\n super().__init__()\n self.net = net\n self.layer = layer # -2\n self.projector = None\n self.projection_size = projection_size #256\n self.projection_hidden_size = projection_hidden_size #4096\n self.hidden = {}\n self.hook_registered = False\n\n def _find_layer(self):\n if type(self.layer) == str:\n modules = dict([*self.net.named_modules()])\n return modules.get(self.layer, None)\n elif type(self.layer) == int:\n children = [*self.net.children()]\n return children[self.layer]\n return None\n\n def _hook(self, _, input, output):\n device = input[0].device\n self.hidden[device] = flatten(output)\n\n def _register_hook(self):\n layer = self._find_layer()\n assert layer is not None, f'hidden layer ({self.layer}) not found'\n handle = layer.register_forward_hook(self._hook)\n self.hook_registered = True\n\n @singleton('projector')\n def _get_projector(self, hidden):\n _, dim = hidden.shape\n projector = MLP(dim, self.projection_size, self.projection_hidden_size)\n return projector.to(hidden)\n\n def get_representation(self, x):\n if self.layer == -1:\n return self.net(x)\n\n if not self.hook_registered:\n self._register_hook()\n\n self.hidden.clear()\n _ = self.net(x)\n hidden = self.hidden[x.device]\n self.hidden.clear()\n\n assert hidden is not None, f'hidden layer {self.layer} never emitted an output'\n return hidden\n\n def forward(self, x, return_projection = True):\n print(\"NW:x.shape:\",x.shape)\n representation = self.get_representation(x)\n print(\"NW:representation.shape:\", representation.shape)\n if not return_projection:\n return representation\n\n projector = self._get_projector(representation)\n projection = projector(representation)\n return projection, representation\n\n# main class\n\nclass BYOL(SegmentationNetwork):\n def __init__(\n self,\n num_classes,deep_supervision,image_size=(8,128,128),\n hidden_layer = -1,\n projection_size = 256,\n projection_hidden_size = 4096,\n augment_fn = None,\n augment_fn2 = None,\n moving_average_decay = 0.99,\n use_momentum = True,\n\n\n ):\n super().__init__()\n self.net = ResNeXt3D(ResNeXtBottleneck, [3, 4, 6, 3], num_classes=2)\n self.do_ds = False\n norm_cfg = 'BN'\n activation_cfg = 'ReLU'\n self.conv_op = nn.Conv3d\n self.norm_op = nn.BatchNorm3d\n self.dropout_op = nn.Dropout3d\n self.num_classes = num_classes\n self._deep_supervision = deep_supervision\n self.do_ds = deep_supervision\n # default SimCLR augmentation\n\n\n DEFAULT_AUG = nn.Sequential(\n RandCrop_tu(image_size=image_size,crop_size=(8,64,64)),\n RandomApply(\n Gaussiannoise_tu(image_size=(8,64,64),SNR=20),\n p=0.5\n ),\n RandomApply(\n Mirror_tu(image_size=(8, 64, 64)),\n p=0.5\n ),\n RandomApply(\n Spatial_tansform_tu(image_size=(8, 64, 64)),\n p=0.5\n ),\n )\n\n self.augment1 = default(augment_fn, DEFAULT_AUG)\n self.augment2 = default(augment_fn2, self.augment1)\n\n self.online_encoder = NetWrapper(self.net, projection_size, projection_hidden_size, layer=hidden_layer)\n\n self.use_momentum = use_momentum\n self.target_encoder = None\n self.target_ema_updater = EMA(moving_average_decay)\n\n self.online_predictor = MLP(projection_size, projection_size, projection_hidden_size)\n\n # get device of network and make wrapper same device\n device = get_module_device(self.net)\n self.to(device)\n\n # send a mock image tensor to instantiate singleton parameters\n #self.forward(torch.randn(2, 3, image_size[0], image_size[1],image_size[2], device=device))\n\n @singleton('target_encoder')\n def _get_target_encoder(self):\n target_encoder = copy.deepcopy(self.online_encoder)\n set_requires_grad(target_encoder, False)\n return target_encoder\n\n def reset_moving_average(self):\n del self.target_encoder\n self.target_encoder = None\n\n def update_moving_average(self):\n assert self.use_momentum, 'you do not need to update the moving average, since you have turned off momentum for the target encoder'\n assert self.target_encoder is not None, 'target encoder has not been created yet'\n update_moving_average(self.target_ema_updater, self.target_encoder, self.online_encoder)\n\n def forward(self,x,return_embedding = False,return_projection = True):\n if return_embedding:\n return self.online_encoder(x, return_projection = return_projection)\n #print(\"//////////////////////////////\")\n #print(\"x.shape\",x.shape)\n image_one, image_two = self.augment1(x),self.augment2(x)\n #print(\"image_one.shape\",image_one.shape)\n online_proj_one, _ = self.online_encoder(image_one)\n online_proj_two, _ = self.online_encoder(image_two)\n\n online_pred_one = self.online_predictor(online_proj_one)\n online_pred_two = self.online_predictor(online_proj_two)\n with torch.no_grad():\n target_encoder = self._get_target_encoder() if self.use_momentum else self.online_encoder\n target_proj_one, _ = target_encoder(image_one)\n target_proj_two, _ = target_encoder(image_two)\n target_proj_one.detach_()\n target_proj_two.detach_()\n\n loss_one = loss_fn(online_pred_one, target_proj_two.detach())\n loss_two = loss_fn(online_pred_two, target_proj_one.detach())\n\n loss = loss_one + loss_two\n return loss.mean()","sub_path":"tuframework/byol/byol.py","file_name":"byol.py","file_ext":"py","file_size_in_byte":13766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"201528534","text":"import tensorflow.compat.v1 as tf\r\ntf.disable_v2_behavior() \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.image as mpimg\r\nimport random\r\nimport pickle\r\nimport scipy.io as sio\r\nfrom numpy.random import seed\r\nfrom numpy.random import randint\r\nfrom sklearn.metrics import confusion_matrix\r\nseed(1)\r\n\r\n#loading data and label \r\ndata_file = open(\"youtube_train_data.pkl\",\"rb\")\r\nall_data,all_labels = pickle.load(data_file,encoding='latin1')\r\ntrain_data = all_data[:6000,:,:,:,:]\r\ntrain_labels = all_labels[:6000,:,:,:]\r\ntest_data = all_data[6000:,:,:,:,:]\r\ntest_labels = all_labels[6000:,:,:,:]\r\ndata_file.close()\r\n\r\n#data normalization \r\n#Hyperparameter Setting \r\nbatch_size = 5\r\nmax_training_epochs = 15000\r\ndisplay_step = 100\r\nlength_train = len(train_data)\r\nlength_test = len(test_data)\r\n\r\n#%----------------Forward Propagation----------------------\r\ntf.reset_default_graph()\r\nsigma_init = 0.1\r\ninput_frames = tf.placeholder(dtype=tf.uint8,shape = [None, 10, 64, 64,3],name = 'input_frames')\r\ntrue_output = tf.placeholder(dtype=tf.float32,shape = [None,10,7,2],name = 'true_output')\r\n\r\n\r\n#Data Preprocessing by scaling and normalizing\r\ninput_frms_float = tf.dtypes.cast(input_frames,tf.float32,name = 'input_frms_float')\r\nmean_input = tf.math.reduce_mean(input_frms_float,axis=(2,3,4),keepdims=True)\r\nstd_input = tf.math.reduce_variance(input_frms_float,axis=(2,3,4),keepdims=True)\r\nx = tf.nn.batch_normalization(input_frms_float,mean_input,std_input,0,1,1e-4)\r\n\r\n#Weigths of the First Convolutional Layer \r\nW1 = tf.Variable(tf.random_normal([5,5,3,32],mean=0,stddev=1/(5*5*3),dtype='float32'),name='W1')\r\nW1_0 = tf.Variable(tf.zeros([32],dtype='float32'),name='W1_0')\r\n\r\n#Weights of the Second Convolutional Layer\r\nW2 = tf.Variable(tf.random_normal([5,5,32,32],mean=0,stddev=1/(5*5*3),dtype='float32'),name='W2')\r\nW2_0 = tf.Variable(tf.zeros([32],dtype='float32'),name='W2_0')\r\n\r\n#Weights of the Third Convolutional Layer\r\nW3 = tf.Variable(tf.random_normal([3,3,32,64],mean=0,stddev=1/(3*3*32),dtype='float32'),name='W3')\r\nW3_0 = tf.Variable(tf.zeros([64],dtype='float32'),name='W3_0')\r\nFc_list = []\r\nfor i in range(10):\r\n xi = x[:,i,:,:,:]\r\n Conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(xi,W1,strides=[1,1,1,1],padding='VALID'),W1_0,name='Conv1'))\r\n Pool1 = tf.nn.max_pool2d(Conv1,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='VALID',name='Pool1')\r\n Conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(Pool1,W2,strides=[1,1,1,1],padding='VALID'),W2_0,name='Conv2'))\r\n Pool2 = tf.nn.max_pool2d(Conv2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='VALID',name='Pool2')\r\n Conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(Pool2,W3,strides=[1,1,1,1],padding='VALID'),W3_0,name='Conv3'))\r\n Fc_list.append(tf.reshape(Conv3,[-1,11*11*64],name='Fc'))\r\n \r\nrnn_input = tf.stack(Fc_list,1)\r\n\r\n#instantiate a LSTM model\r\nnum_units = 42\r\nlstm_cell = tf.nn.rnn_cell.LSTMCell(num_units)\r\nh_val, _ = tf.nn.dynamic_rnn(lstm_cell,rnn_input,dtype=tf.float32)\r\n\r\n#Layer between Hidden State and Output\r\nWc = tf.Variable(tf.random_normal([num_units,14],mean=0,stddev=1/(num_units),dtype='float32'),name='Wc')\r\nbc = tf.Variable(tf.zeros([14],dtype='float32'),name='bc')\r\n\r\n\r\n#collection of all the final output \r\nfinal_output_list = []\r\nfor i in range(10):\r\n output = tf.add(tf.matmul(h_val[:,i,:],Wc),bc)\r\n output = tf.reshape(output,[-1,7,2])\r\n final_output_list.append(output)\r\n\r\nfinal_output = tf.stack(final_output_list,1)\r\njoint_pos = tf.identity(final_output,name='joint_pos')\r\n\r\n#Create the collection \r\ntf.get_collection(\"validation_nodes\")\r\n#Add stuff to the collection \r\ntf.add_to_collection(\"validation_nodes\",input_frames)\r\ntf.add_to_collection(\"validation_nodes\", joint_pos)\r\n\r\nsaver = tf.train.Saver()\r\n\r\n\r\n\r\n#%-----------------Loss Function-------------------------------------------\r\nloss_func = tf.compat.v1.losses.mean_squared_error(labels=true_output,predictions=joint_pos)\r\ncost = tf.div(tf.reduce_sum(loss_func),batch_size)\r\ntrain_step = tf.train.AdamOptimizer(0.0001).minimize(loss_func)\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsess = tf.Session(config=config)\r\ninit = tf.global_variables_initializer()\r\n\r\ntrain_err_list = []\r\ntest_err_list = []\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n train_err = []\r\n test_err = []\r\n cost_list = []\r\n epoch = 0 \r\n while epoch < max_training_epochs:\r\n\r\n #Random batchfor SGD \r\n random_inds = randint(0,length_train-1,batch_size) \r\n batch_x = train_data[random_inds,:]\r\n batch_y = train_labels[random_inds]\r\n \r\n feed_dict_train = {input_frames:batch_x,true_output:batch_y};\r\n train_step.run(feed_dict=feed_dict_train)\r\n #print(\"Epoch number \" + str(epoch))\r\n cost_ep = cost.eval(feed_dict_train)\r\n #print(cost_ep)\r\n cost_list.append(cost_ep)\r\n \r\n #Training Error\r\n predictions = sess.run(joint_pos, feed_dict = {input_frames:batch_x})\r\n labels = batch_y\r\n train_err = np.mean(np.linalg.norm(predictions.reshape((-1,2)) - labels.reshape((-1,2)), axis = 1))\r\n\r\n #Testing Error\r\n random_inds = randint(0,length_test-1,batch_size)\r\n test_x = test_data[random_inds,:]\r\n test_y = test_labels[random_inds]\r\n predictions = sess.run(joint_pos, feed_dict = {input_frames: test_x})\r\n labels = test_y\r\n test_err = np.mean(np.linalg.norm(predictions.reshape((-1,2)) - labels.reshape((-1,2)), axis = 1))\r\n if (epoch+1) % display_step == 0:\r\n print(\"Epoch number \" + str(epoch))\r\n print(cost_ep)\r\n print(\"Training Error is \" + str(train_err))\r\n print(\"Testing Error is \"+ str(test_err))\r\n train_err_list.append(train_err)\r\n test_err_list.append(test_err)\r\n epoch = epoch + 1\r\n save_path = saver.save(sess,\"my_model\")\r\n random_inds = randint(0,length_test-1,100)\r\n test_x = test_data[random_inds,:]\r\n test_y = test_labels[random_inds]\r\n predictions = sess.run(joint_pos, feed_dict = {input_frames: test_x})\r\n labels = test_y\r\n joint_err = np.mean(np.reshape(np.sqrt(np.sum(np.square(labels - predictions),axis=3)),[-1,7]),axis = 0)\r\nprint(joint_err)\r\n#%% plot the training error\r\n#%% plot the overall training accuracy\r\nplt.plot(train_err_list)\r\nplt.xlabel('per 100 iterations')\r\nplt.ylabel('Average Pixel Distance error for training')\r\nplt.savefig('average_error_train.jpg')\r\nplt.close()\r\n\r\n#%% plot the overall testing error\r\nplt.plot(test_err_list)\r\nplt.xlabel('per 100 iterations')\r\nplt.ylabel('Average Pixel Distance error for testing')\r\nplt.savefig('average_error_test.jpg')\r\nplt.close()\r\n\r\n#Visualization of Pose Estimation\r\nfigs,axs = plt.subplots(2,3,constrained_layout = True)\r\nfor i in range(6):\r\n image = test_x[0][i]\r\n r = np.int32(np.divide(i,3))\r\n c = np.int32(np.mod(i,3))\r\n axs[r,c].set_title(str(i))\r\n axs[r,c].imshow(image)\r\n pred = np.round(predictions[0][i])\r\n gt = np.round(labels[0][i])\r\n for j in range(7):\r\n axs[r,c].scatter(pred[j][0],pred[j][1],s=100,c='red',marker='x')\r\n axs[r,c].scatter(gt[j][0],gt[j][1],s=100,c='blue',marker='x')\r\n\r\nfigs.suptitle('Sequence of Images with Body pose markers')\r\n#plt.show()\r\nplt.savefig('body_pose.jpg')\r\nplt.close()\r\njoint_err_all = np.reshape(np.sqrt(np.sum(np.square(labels - predictions),axis=3)),[-1,7])\r\n\r\nmy_color = ['b', 'g', 'r', 'c','m','y','k']\r\nmy_label = ['head', 'right shoulder', 'left shoulder', 'right wrist', 'left wrist' , 'right elbow', 'left elbow']\r\nfor i in range(6):\r\n joint_err = np.array(joint_err_all[:,i])\r\n acc_list = []\r\n for dev in range(21):\r\n acc = np.sum(joint_err < dev )/ 1000 \r\n acc_list.append(acc)\r\n plt.plot(acc_list,color = my_color[i],label = my_label[i])\r\nplt.legend(loc='best')\r\nplt.ylabel('Accuracy[%]')\r\nplt.xlabel('pixel distance from GT')\r\nplt.title('prediction accuracy within 20 pixel')\r\nplt.savefig('accuracy_curve.jpg')\r\nexit()\r\n\r\n\r\n\r\n","sub_path":"body_pose_estimation_LSTM.py","file_name":"body_pose_estimation_LSTM.py","file_ext":"py","file_size_in_byte":8023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268873353","text":"import tkinter #Not following the convention, this is to increase awareness of where the imports are coming from\nfrom tkinter import ttk\nimport kh_model\nimport well_being\nimport feom\n\n\nif __name__ == \"__main__\":\n root = tkinter.Tk()\n root.title(\"Må bra för Pappa och Tord\")\n\n notebook = ttk.Notebook(root)\n\n activities_frame = well_being.BuddhistWellBeingFrameV(notebook) # , padding=\"10 10 20 20\"\n activities_frame.columnconfigure(0, weight=1)\n activities_frame.rowconfigure(0, weight=1)\n\n responses_frame = ttk.Frame(notebook)\n\n diary_frame = ttk.Frame(notebook)\n\n # experimental_frame = pomodoro.PomodoroFrameV(notebook)\n\n feom_frame = feom.FeomFrameV(notebook)\n\n # notebook.add(experimental_frame, text=\"Pomodoro\")\n notebook.add(activities_frame, text=\"Activities\")\n notebook.add(responses_frame, text=\"Responses\")\n notebook.add(diary_frame, text=\"Diary\")\n notebook.add(feom_frame, text=\"4 Est of Mindfulness\")\n\n notebook.pack(fill=tkinter.BOTH)\n\n # root.iconbitmap(default=\"favicon.xpm\")\n #image = tkinter.Image(\"photo\", file=\"favicon.png\")\n #root.iconphoto(image)\n #root.iconwindow(\"@favicon.xpm\")\n\n root.mainloop()\n","sub_path":"kh_main.py","file_name":"kh_main.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"605214990","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\nimport torch\nimport unittest\nfrom gpytorch.kernels import SpectralMixtureKernel\n\n\nclass TestSpectralMixtureKernel(unittest.TestCase):\n def test_standard(self):\n a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1)\n b = torch.tensor([0, 2], dtype=torch.float).view(2, 1)\n means = [1, 2]\n scales = [0.5, 0.25]\n weights = [4, 2]\n kernel = SpectralMixtureKernel(num_mixtures=2)\n kernel.initialize(\n log_mixture_weights=torch.tensor([[4, 2]], dtype=torch.float).log(),\n log_mixture_means=torch.tensor([[[[1]], [[2]]]], dtype=torch.float).log(),\n log_mixture_scales=torch.tensor([[[[0.5]], [[0.25]]]], dtype=torch.float).log(),\n )\n kernel.eval()\n\n actual = torch.zeros(3, 2)\n for i in range(3):\n for j in range(2):\n for k in range(2):\n new_term = torch.cos(2 * math.pi * (a[i] - b[j]) * means[k])\n new_term *= torch.exp(-2 * (math.pi * (a[i] - b[j])) ** 2 * scales[k] ** 2)\n new_term *= weights[k]\n actual[i, j] += new_term.item()\n\n res = kernel(a, b).evaluate()\n self.assertLess(torch.norm(res - actual), 1e-5)\n\n def test_batch_separate(self):\n a = torch.tensor([[4, 2, 8], [1, 2, 3]], dtype=torch.float).view(2, 3, 1)\n b = torch.tensor([[0, 2], [-1, 2]], dtype=torch.float).view(2, 2, 1)\n means = torch.tensor([[1, 2], [2, 3]], dtype=torch.float).view(2, 2, 1, 1)\n scales = torch.tensor([[0.5, 0.25], [0.25, 1]], dtype=torch.float).view(2, 2, 1, 1)\n weights = torch.tensor([[4, 2], [1, 2]], dtype=torch.float).view(2, 2)\n kernel = SpectralMixtureKernel(batch_size=2, num_mixtures=2)\n kernel.initialize(\n log_mixture_weights=weights.log(), log_mixture_means=means.log(), log_mixture_scales=scales.log()\n )\n kernel.eval()\n\n actual = torch.zeros(2, 3, 2)\n for l in range(2):\n for k in range(2):\n for i in range(3):\n for j in range(2):\n new_term = torch.cos(2 * math.pi * (a[l, i] - b[l, j]) * means[l, k])\n new_term *= torch.exp(-2 * (math.pi * (a[l, i] - b[l, j])) ** 2 * scales[l, k] ** 2)\n new_term *= weights[l, k]\n actual[l, i, j] += new_term.item()\n\n res = kernel(a, b).evaluate()\n self.assertLess(torch.norm(res - actual), 1e-5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/kernels/test_spectral_mixture_kernel.py","file_name":"test_spectral_mixture_kernel.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38548020","text":"\"\"\"\nAuthor: kinnala\n\nSolve the Kirchhoff plate bending problem in a unit square\nwith clamped boundary conditions using the nonconforming\nMorley element. Demonstrates also the visualization of\nhigher order solutions using 'GlobalBasis.refinterp'.\n\"\"\"\n\nfrom skfem import *\nimport numpy as np\n\nm = MeshTri()\nm.refine(3)\n\ne = ElementTriMorley()\nmap = MappingAffine(m)\nib = InteriorBasis(m, e, map, 4)\n\n@bilinear_form\ndef bilinf(u, du, ddu, v, dv, ddv, w):\n # plate thickness\n d = 1.0\n E = 1.0\n nu = 0.3\n\n def C(T):\n trT = T[0,0] + T[1,1]\n return np.array([[E/(1.0+nu)*(T[0, 0]+nu/(1.0-nu)*trT), E/(1.0+nu)*T[0, 1]],\n [E/(1.0+nu)*T[1, 0], E/(1.0+nu)*(T[1, 1]+nu/(1.0-nu)*trT)]])\n\n def Eps(ddU):\n return np.array([[ddU[0][0], ddU[0][1]],\n [ddU[1][0], ddU[1][1]]])\n\n def ddot(T1, T2):\n return T1[0, 0]*T2[0, 0] +\\\n T1[0, 1]*T2[0, 1] +\\\n T1[1, 0]*T2[1, 0] +\\\n T1[1, 1]*T2[1, 1]\n\n return d**3/12.0*ddot(C(Eps(ddu)), Eps(ddv))\n\n@linear_form\ndef linf(v, dv, ddv, w):\n return 1.0*v\n\nK = asm(bilinf, ib)\nf = asm(linf, ib)\n\nx, D = ib.find_dofs()\nI = ib.dofnum.complement_dofs(D)\n\nx[I] = solve(*condense(K, f, I=I))\n\nif __name__ == \"__main__\":\n M, X = ib.refinterp(x, 3)\n ax = m.draw()\n M.plot(X, smooth=True, edgecolors='', ax=ax)\n M.show()\n","sub_path":"examples/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"51692663","text":"from GE_Requirements import GeRequirements\n\n\nclass DegreeApplicableUnits(GeRequirements):\n\n def __init__(self, student_id, degree_applicable_dict, major_courses_list, completed_ge_courses, completed_ge_units,\n major_units_list):\n self.student_id = student_id\n self.degree_applicable_dict = degree_applicable_dict\n self.major_courses_list = major_courses_list\n self.major_units_list = major_units_list\n self.completed_ge_courses = completed_ge_courses\n self.completed_ge_units = completed_ge_units\n self.elective_course_list = []\n self.elective_units_list = []\n self.elective_dict = {}\n\n def elective_courses(self):\n proficiency_list = ['Writing_Proficiency', 'Math_Proficiency', 'Health_Proficiency', 'Reading_Proficiency']\n ge_course_list = []\n ge_course = False\n major_course = False\n elective_course = False\n degree_units = sum(self.completed_ge_units) + sum(self.major_units_list) + sum(\n self.elective_units_list)\n for key in self.completed_ge_courses:\n if key not in proficiency_list:\n ge_course_list.append(self.completed_ge_courses[key])\n # print('elective ge courses', ge_course_list)\n for course_key in self.degree_applicable_dict:\n # print('degree units', degree_units)\n if degree_units < 60:\n ge_course = False\n major_course = False\n # print('course key', course_key)\n # print('GE list', self.completed_ge_courses)\n\n if course_key in ge_course_list:\n ge_course = True\n # print('major courses', self.major_courses_list)\n if course_key in self.major_courses_list:\n major_course = True\n\n if course_key in self.elective_course_list:\n elective_course = True\n\n if not ge_course:\n if not major_course:\n if not elective_course:\n self.elective_dict[course_key] = self.degree_applicable_dict[course_key]\n print('elec dict', self.elective_dict)\n self.elective_course_list.append(course_key)\n # print('elective list', self.elective_course_list)\n self.elective_units_list.append(self.degree_applicable_dict[course_key])\n degree_units = sum(self.completed_ge_units) + sum(self.major_units_list) + sum(\n self.elective_units_list)\n return self.elective_units_list, self.elective_course_list, self.elective_dict\n","sub_path":"Degree_Applicable_Electives.py","file_name":"Degree_Applicable_Electives.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"205424486","text":"from web3 import (\n Web3,\n)\n\n\nclass EthSigner():\n def __init__(self, root_path, config_data, privkey_name, privkey_pwd):\n keystore = config_data[\"wallet-eth\"][privkey_name]['keystore']\n with open(root_path + keystore, \"r\") as f:\n encrypted_key = f.read()\n\n self.web3 = Web3()\n self.eth_privkey = self.web3.eth.account.decrypt(encrypted_key,\n privkey_pwd)\n acct = self.web3.eth.account.from_key(self.eth_privkey)\n self.address = acct.address\n\n def sign(self, h: bytes):\n return self.web3.eth.account.signHash(h, private_key=self.eth_privkey)\n","sub_path":"ethaergo_bridge_operator/validator/eth_signer.py","file_name":"eth_signer.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318101567","text":"# -*- coding: utf-8 -*-\n\"\"\"\n core.utils.admin\n ~~~~~~~~~~~~~~~~\n\n Admin utils for project apps.\n\n :copyright: (c) 2015 by Rambler&Co.\n\"\"\"\n\nimport reversion\n\nfrom django.contrib import admin\nfrom django.db import models\nfrom django.template.response import TemplateResponse\nfrom django.contrib.admin.utils import unquote\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_text\nfrom django.utils.html import escape\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.db import transaction\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.http import Http404\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.contrib.admin.views.main import ChangeList\nfrom django.contrib.admin.filters import RelatedFieldListFilter\n\nfrom admin_app.core.admin import make_published\nfrom admin_app.core.utils.widgets import AjaxChosenWidget\nfrom admin_app.core.utils.widgets import AjaxChosenWidgetMultiple\nfrom datetimewidget.widgets import DateWidget\nfrom datetimewidget.widgets import TimeWidget\n\nfrom admin_app.core.utils.forms import AutocompleteFilterForm\n\n\nclass AddUserMixin(object):\n\n def save_model(self, request, obj, form, change):\n obj.author = request.user\n obj.save()\n\n\nclass ReadonlyInline(admin.TabularInline):\n inlines = tuple()\n extra = 0\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass CommonAdminMixin(object):\n formfield_overrides = {\n models.ForeignKey: {'widget': AjaxChosenWidget},\n models.ManyToManyField: {'widget': AjaxChosenWidgetMultiple},\n models.DateField: {\n 'widget': DateWidget(\n usel10n=True,\n attrs={},\n options={'todayBtn': 'true', 'todayHighlight': 'true'}\n )\n },\n models.TimeField: {\n 'widget': TimeWidget(attrs={}, options={'language': 'ru'})\n },\n }\n\n\nclass RelatedTagsFieldListFilter(RelatedFieldListFilter):\n\n def field_choices(self, field, request, model_admin):\n tags_ids = set(\n model_admin.model.get_tags_by_type().values_list('pk', flat=True)\n )\n return field.get_choices(\n include_blank=False, limit_choices_to={'pk__in': tags_ids}\n )\n\n\nclass ContentAdminMixin(CommonAdminMixin):\n list_select_related = [\n 'main_image', 'main_tag', 'seo', 'content_type', 'author'\n ]\n exclude = ('author',)\n raw_id_fields = ('author',)\n search_fields = (\n 'title',\n 'announce',\n 'text',\n 'id',\n )\n list_filter = (\n ('main_tag', RelatedTagsFieldListFilter),\n ('tags', RelatedTagsFieldListFilter),\n 'source_info__source',\n 'status',\n ('author', admin.RelatedOnlyFieldListFilter),\n )\n readonly_fields = (\n 'rate',\n 'preview_main_image',\n )\n actions = (\n make_published,\n # TODO: uncomment if needed\n # make_deleted,\n )\n\n def changelist_view(self, request, extra_context=None):\n if 'status__exact' not in request.GET:\n q = request.GET.copy()\n q['status__exact'] = '0' # 'Черновик' by default\n request.GET = q\n request.META['QUERY_STRING'] = request.GET.urlencode()\n return super(ContentAdminMixin, self).changelist_view(\n request, extra_context=extra_context\n )\n\n def save_related(self, request, form, formsets, change):\n super(ContentAdminMixin, self).save_related(\n request, form, formsets, change\n )\n if hasattr(form, 'instance') and form.instance:\n seo = form.instance.get_seo()\n\n def get_queryset(self, request):\n return (\n super(ContentAdminMixin, self).get_queryset(request)\n .prefetch_related(\n 'main_image',\n 'source_info__source',\n )\n )\n\n\nclass ExcludeObjChangeList(ChangeList):\n\n def get_queryset(self, request):\n queryset = super(ExcludeObjChangeList, self).get_queryset(request)\n queryset = queryset.exclude(pk=request.obj.pk)\n return queryset\n\n\nclass MergeAdminMixin(object):\n merge_content_template = 'admin/core/merge_content.html'\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n extra_context = {\n 'has_merge_content_view': hasattr(self, 'merge_content_view'),\n }\n return super(MergeAdminMixin, self).change_view(\n request, object_id, form_url='', extra_context=extra_context\n )\n\n @method_decorator(csrf_protect)\n @transaction.atomic\n @reversion.create_revision()\n def merge_content_view(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n if obj is None:\n raise Http404(\n _('%(name)s object with primary key %(key)r does not exist.')\n % {\n 'name': force_text(self.model._meta.verbose_name),\n 'key': escape(object_id),\n })\n\n opts = self.model._meta\n app_label = opts.app_label\n is_help_msg = False\n\n if request.method == 'GET':\n filter_form = AutocompleteFilterForm(\n model=self.model, data=request.GET,\n )\n request.obj = obj\n list_display = (\n 'title',\n 'id',\n 'source_info_display',\n 'action_checkbox',\n )\n cl = ExcludeObjChangeList(\n request, self.model, list_display, (), ('title',),\n self.date_hierarchy, (), self.list_select_related,\n self.list_per_page, self.list_max_show_all,\n self.list_editable, self\n )\n\n cl.formset = None\n cl.result_list = cl.result_list[:20]\n if cl.queryset.count() > cl.result_list.count() + 1 \\\n or not cl.queryset.count():\n is_help_msg = True\n\n elif request.method == 'POST':\n msg, status = self.merge(request, obj)\n self.message_user(\n request,\n mark_safe(msg),\n status or messages.SUCCESS\n )\n\n redirect_url = u'{0}merge_content/?q={1}'.format(\n reverse(\n u'admin:{0}_{1}_change'\n .format(opts.app_label, opts.model_name),\n current_app=self.admin_site.name,\n args=[obj.pk]\n ),\n request.POST.get('q'),\n )\n return HttpResponseRedirect(redirect_url)\n\n else:\n return Http404\n\n context = dict(\n self.admin_site.each_context(request),\n title=_('Change history: %s') % force_text(obj),\n module_name=capfirst(force_text(opts.verbose_name_plural)),\n object=obj,\n opts=opts,\n cl=cl,\n is_help_msg=is_help_msg,\n filter_form=filter_form,\n preserved_filters=self.get_preserved_filters(request),\n )\n context.update(extra_context or {})\n\n request.current_app = self.admin_site.name\n return TemplateResponse(\n request, self.merge_content_template, context\n )\n\n def get_urls(self):\n from django.conf.urls import url\n\n urlpatterns = list(super(MergeAdminMixin, self).get_urls())\n urlpatterns.insert(-1, url(r'^(.+)/merge_content/$',\n self.merge_content_view,\n name='admin_merge_content'))\n return urlpatterns\n\n def merge(self, request, obj):\n msg, status = u'', None\n\n ids = request.POST.getlist('_selected_action')\n if ids:\n items, item_msgs = [], []\n for _id in ids:\n try:\n item = self.model.objects.get(id=_id)\n except:\n msg = u'Объект с ID: {0} не найден.'.format(unicode(_id))\n status = messages.ERROR\n break\n else:\n items.append(item)\n item_msg = self.item_handling(obj, item)\n if item_msg:\n item_msgs.append(item_msg)\n for source_info in item.source_info.all():\n source_info.object_id = obj.id\n source_info.save()\n item.delete()\n\n if items:\n msg = (u'

Ссылки источников:

    {0}
'\n u'

Удалены:

    {1}
').format(\n u''.join([u'
  • Ссылки с {0} переназначена на {1}.
  • '\n .format(j, obj) for j in items]),\n u''.join([u'
  • {0}
  • '.format(i) for i in items]),\n )\n if item_msgs:\n msg = u'{0}{1}'.format(msg, u''.join(item_msgs))\n else:\n msg = u'Вберете хотя бы один объект из списка.'\n status = messages.WARNING\n return msg, status\n\n def item_handling(self, obj, item):\n return u''\n\n @staticmethod\n def change_source_info(main, second):\n \"\"\"\n Move source_ino links from second to main.\n \"\"\"\n for info in second.source_info.all():\n info.object_id = main.id\n info.save()\n\n\nclass EventPlaceMergeAdminMixin(MergeAdminMixin):\n\n def item_handling(self, obj, item):\n model = obj.get_model_name()\n model_i = 'event' if model == 'place' else 'place' # inverse model\n\n schedules = item.sessions.all()\n for schedule_item in schedules:\n if obj.sessions.filter(\n **{model_i: getattr(schedule_item, model_i)}\n ).exists():\n schedule_obj = obj.sessions.get(\n **{model_i: getattr(schedule_item, model_i)}\n )\n self.change_source_info(schedule_obj, schedule_item)\n\n for s_item in schedule_item.sessions.all():\n for s_obj in schedule_obj.sessions.all():\n if s_obj.date_start == s_item.date_start \\\n and s_obj.time_start == s_item.time_start:\n self.change_source_info(s_obj, s_item)\n break\n else:\n pass\n else:\n s_item.event_place = schedule_obj\n s_item.save()\n else:\n setattr(schedule_item, model, obj)\n schedule_item.save()\n\n if schedules:\n sessions = []\n for i in obj.sessions.all():\n s_list = [u'
  • {0}
  • '.format(s) for s in i.sessions.all()]\n u'
  • {0}

    Сеансы:

      {1}
  • '\\\n .format(i, u''.join(s_list))\n msg = (u'

    Расписания переназначены с {0} на {1}:'\n u'

      {2}
    '\n .format(item, obj, u''.join(sessions)))\n else:\n msg = u''\n return msg\n","sub_path":"src/admin_app/core/utils/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":11635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"94652723","text":"from rest_framework import viewsets\n\nfrom pretalx.api.serializers.submission import (\n ScheduleListSerializer,\n ScheduleSerializer,\n SubmissionOrgaSerializer,\n SubmissionReviewerSerializer,\n SubmissionSerializer,\n TagSerializer,\n)\nfrom pretalx.schedule.models import Schedule\nfrom pretalx.submission.models import Submission, Tag\n\n\nclass SubmissionViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SubmissionSerializer\n queryset = Submission.objects.none()\n lookup_field = \"code__iexact\"\n filterset_fields = (\"state\", \"content_locale\", \"submission_type\")\n search_fields = (\"title\", \"speakers__name\")\n\n def get_queryset(self):\n if self.request._request.path.endswith(\n \"/talks/\"\n ) or not self.request.user.has_perm(\n \"orga.view_submissions\", self.request.event\n ):\n if (\n not self.request.user.has_perm(\n \"agenda.view_schedule\", self.request.event\n )\n or not self.request.event.current_schedule\n ):\n return Submission.objects.none()\n return self.request.event.submissions.filter(\n pk__in=self.request.event.current_schedule.talks.filter(\n is_visible=True\n ).values_list(\"submission_id\", flat=True)\n )\n return self.request.event.submissions.all()\n\n def get_serializer_class(self):\n if self.request.user.has_perm(\"orga.change_submissions\", self.request.event):\n return SubmissionOrgaSerializer\n if self.request.user.has_perm(\"orga.view_submissions\", self.request.event):\n return SubmissionReviewerSerializer\n return SubmissionSerializer\n\n def get_serializer(self, *args, **kwargs):\n can_view_speakers = self.request.user.has_perm(\n \"agenda.view_schedule\", self.request.event\n ) or self.request.user.has_perm(\"orga.view_speakers\", self.request.event)\n if self.request.query_params.get(\"anon\"):\n can_view_speakers = False\n return super().get_serializer(\n *args,\n can_view_speakers=can_view_speakers,\n event=self.request.event,\n **kwargs\n )\n\n\nclass ScheduleViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = ScheduleSerializer\n queryset = Schedule.objects.none()\n lookup_field = \"version__iexact\"\n\n def get_serializer_class(self):\n if self.action == \"list\":\n return ScheduleListSerializer\n return ScheduleSerializer # self.action == 'retrieve'\n\n def get_object(self):\n try:\n return super().get_object()\n except Exception:\n is_public = (\n self.request.event.is_public\n and self.request.event.settings.show_schedule\n )\n has_perm = self.request.user.has_perm(\n \"orga.edit_schedule\", self.request.event\n )\n query = self.kwargs.get(self.lookup_field)\n if has_perm and query == \"wip\":\n return self.request.event.wip_schedule\n if (\n (has_perm or is_public)\n and query == \"latest\"\n and self.request.event.current_schedule\n ):\n return self.request.event.current_schedule\n raise\n\n def get_queryset(self):\n qs = self.queryset\n is_public = (\n self.request.event.is_public and self.request.event.settings.show_schedule\n )\n current_schedule = (\n self.request.event.current_schedule.pk\n if self.request.event.current_schedule\n else None\n )\n\n if self.request.user.has_perm(\"orga.view_schedule\", self.request.event):\n return self.request.event.schedules.all()\n if is_public:\n return self.request.event.schedules.filter(pk=current_schedule)\n return qs\n\n\nclass TagViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = TagSerializer\n queryset = Tag.objects.none()\n lookup_field = \"tag__iexact\"\n\n def get_queryset(self):\n if self.request.user.has_perm(\"orga.view_submissions\", self.request.event):\n return self.request.event.tags.all()\n return Tag.objects.none()\n","sub_path":"src/pretalx/api/views/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"404418702","text":"# Sqlite Functions\n\nimport sqlite3\n\n# Open SQL DB and cursor\nwith sqlite3.connect('cars.db') as sqlCon:\n c = sqlCon.cursor()\n\n # Retrieve all data from inventory table\n c.execute(\"SELECT * FROM inventory\")\n inventory = c.fetchall()\n\n # Iterate through inventory rows\n for make, model, quantity in inventory:\n\n # Count the orders for the current model\n c.execute(\"SELECT count(order_date) FROM orders WHERE model = '{}'\".format(model))\n orders = c.fetchone()[0]\n\n # Print the data\n print(make, model)\n print('Quantity:', quantity)\n print('Orders:', orders)\n print()\n","sub_path":"homeworkh.py","file_name":"homeworkh.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231039727","text":"import numpy as np\ndef somalinhas(a):\n b=[]\n soma=0\n for i in range(0,a.shape[0],1):\n for j in range(0,a.shape[1],1):\n soma=soma+a[i,j]\n b.append(soma)\ndef somacolunas(a):\n c=[]\n soma=0\n for i in range(0,a.shape[0],1):\n for j in range(0,a.shape[1],1):\n soma=soma+a[i,j]\n c.append(soma)\ndef diagonal(a):\n soma=0\n for i in range(0,a.shape[0],1):\n soma=soma+a[i,i]\n return (soma)\ndef diagonalsec(a):\n soma=0\n for i in range(a.shape[0]-1,-1,-1):\n soma=soma+a[i,i]\n return (soma)\ndef magico(a):\n x=somalinhas(a)\n y=somacolunas(a)\n d=diagonal(a)\n e=diagonalsec(a)\n if x in y:\n if d==x[0]:\n return True\n else:\n return False\nlinhas=int(input('LInhas: '))\ncolunas=int(input('colunas: '))\na=np.zeros((linhas,colunas))\nfor i in range(0,a.shape[0],1):\n for j in range(0,a.shhape[1],1):\n a[i,j]=int(input('valores: '))\nif magico(a):\n print('S')\nelse:\n print('N')","sub_path":"moodledata/vpl_data/59/usersdata/162/61524/submittedfiles/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"281597554","text":"from Components.Converter.Converter import Converter\nfrom Components.config import config\nfrom Components.Element import cached\nfrom os import path\nfrom Plugins.Extensions.MyMetrixLite.__init__ import initOtherConfig\nimport Screens.Standby\n\ninitOtherConfig()\n\nclass MetrixSTBinfo(Converter, object):\n\n\tdef __init__(self, type):\n\t\tConverter.__init__(self, type)\n\t\tself.type = type\n\n\t@cached\n\tdef getText(self):\n\t\tif Screens.Standby.inStandby:\n\t\t\treturn \"\"\n\t\telif self.type == \"CPUload\":\n\t\t\treturn self.getCPUload()\n\t\telif self.type == \"CPUtemp\":\n\t\t\treturn self.getCPUtemp()\n\t\telif self.type == \"SYStemp\":\n\t\t\treturn self.getSYStemp()\n\t\telif self.type ==\"MyMetrixConfig\":\n\t\t\treturn self.getMyMetrixConfig()\n\t\telse:\n\t\t\treturn \"\"\n\n\tdef getMyMetrixConfig(self):\n\t\tinfo = \"\"\n\t\tspace = \" \"\n\t\tif config.plugins.MyMetrixLiteOther.showCPULoad.getValue() is not False:\n\t\t\tinfo += self.getCPUload()\n\t\tif config.plugins.MyMetrixLiteOther.showCPUTemp.getValue() is not False:\n\t\t\tinfo += space + self.getCPUtemp()\n\t\tif config.plugins.MyMetrixLiteOther.showSYSTemp.getValue() is not False:\n\t\t\tinfo += space + self.getSYStemp()\n\t\treturn info\n\n\tdef getCPUload(self):\n\t\tinfo = \"\"\n\t\ttemp = \"\"\n\t\tif path.exists('/proc/loadavg'):\n\t\t\tf = open('/proc/loadavg', 'r')\n\t\t\ttemp = f.read()\n\t\t\tf.close()\n\t\t\tinfo = \"CPU-Load: \" + str(temp[:4])\n\t\telse:\n\t\t\tinfo = \"\"\n\t\treturn info\n\n\tdef getCPUtemp(self):\n\t\tinfo = \"\"\n\t\ttemp = \"\"\n\t\tif path.exists('/proc/stb/fp/temp_sensor_avs'):\n\t\t\tf = open('/proc/stb/fp/temp_sensor_avs', 'r')\n\t\t\ttemp = f.read()\n\t\t\tf.close()\n\t\tif temp and int(temp.replace('\\n', '')) > 0:\n\t\t\tinfo =\"CPU-Temp: \" + temp.replace('\\n', '') + str('\\xc2\\xb0') + \"C\"\n\t\telse:\n\t\t\tinfo = \"\"\n\t\treturn info\n\n\tdef getSYStemp(self):\n\t\tinfo = \"\"\n\t\ttemp = \"\"\n\t\tif path.exists('/proc/stb/sensors/temp0/value'):\n\t\t\tf = open('/proc/stb/sensors/temp0/value', 'r')\n\t\t\ttemp = f.read()\n\t\t\tf.close()\n\t\telif path.exists('/proc/stb/fp/temp_sensor'):\n\t\t\tf = open('/proc/stb/fp/temp_sensor', 'r')\n\t\t\ttemp = f.read()\n\t\t\tf.close()\n\t\tif temp and int(temp.replace('\\n', '')) > 0:\n\t\t\tinfo =\"SYS-Temp: \" + temp.replace('\\n', '') + str('\\xc2\\xb0') + \"C\"\n\t\telse:\n\t\t\tinfo = \"\"\n\t\treturn info\n\n\ttext = property(getText)\n\n","sub_path":"usr/lib/enigma2/python/Components/Converter/MetrixSTBinfo.py","file_name":"MetrixSTBinfo.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"62109225","text":"S = list(input())\nT = list(input())\ns = len(S)\nt = len(T)\nfor i in range(s-t,-1,-1):\n ss = S[i:i+t]\n for j in range(t):\n if ss[j] != \"?\" and ss[j] != T[j]:\n break\n else:\n for j in range(t):\n if ss[j] == \"?\":\n S[i+j] = T[j]\n break\nelse:\n print(\"UNRESTORABLE\")\n exit()\nfor i in range(s):\n if S[i] == \"?\":\n S[i] = \"a\"\nprint(\"\".join(S))","sub_path":"Python_codes/p03565/s010839419.py","file_name":"s010839419.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"331386668","text":"\"\"\"task 2 / time 17:45.3 \"\"\"\r\n\r\n\r\ndef collect_names(mails):\r\n unique_names = []\r\n start_phrase = \"Subject: \"\r\n end_phrase = \" viewed the document\"\r\n for mail in mails:\r\n target = mail.find(end_phrase)\r\n if target != -1:\r\n unique_names.append(mail[(mail.find(start_phrase) + len(start_phrase)): target])\r\n else:\r\n continue\r\n return list(set(unique_names))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n mails = [\r\n 'Sep 30, 2019. From: Robot. Subject: John Doe viewed the document. 123',\r\n 'Oct 15, 2019. To: me. Subject: Spam. Spam',\r\n 'Dec 2, 2019. From: Robot. To: me. Subject: Vasya Pupkin viewed the document. Please sign',\r\n 'Dec 15, 2019. Subject: The truth is out there',\r\n 'Dec 25, 2019. Subject: Merry Christmas!',\r\n 'Jan 10, 2020. Subject: Fox Mulder viewed the document. Please check',\r\n 'Jan 12, 2020. Subject: John Doe viewed the document. Great news...'\r\n ] # task data\r\n print(collect_names(mails))","sub_path":"ait/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175610016","text":"# Game Dimensions (rows & cols should evenly divide game board)\nWIDTH = 600\nHEIGHT = 600\nROWS = 10\nCOLS = 10\nCELL_SIZE = WIDTH // ROWS\n\n# Starting position\nSTART_POS = (5, 5)\n\n# Color tuples\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232180555","text":"# -*- coding: utf-8 -*-\n\nfrom VGG_8 import VGG_8\nfrom augmentation import *\nfrom dataset_3d_lc import *\nfrom torch.utils import data\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport argparse\nfrom utils import AverageMeter\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--seq_len', default=8, type=int, help='number of frames in each sequence')\nparser.add_argument('--temp_VGG', action='store_true', help='standard or temporal VGG-8')\nparser.add_argument('--mode', default='CPC', help='Self-supervised algorithm, necessary for retrieving saved network structure')\nparser.add_argument('--spatial_collapse', action='store_true', help='performing average pooling or not to obtain z')\nparser.add_argument('--spatial_segm', action='store_true', help='use of spatial negatives (if not, then flattening)')\nparser.add_argument('--single_predictor', action='store_true', help='use of a single recursively applied predictor')\nparser.add_argument('--predictor_bias', action='store_true', help='linear predicting layer having bias or not')\nparser.add_argument('--monitor_all_layers', action='store_true', help='perform the classification at each layer')\nparser.add_argument('--batch_size', default=16, type=int)\nparser.add_argument('--lr', default=1e-3, type=float, help='learning rate')\nparser.add_argument('--wd', default=1e-5, type=float, help='weight decay')\nparser.add_argument('--epochs', default=10, type=int, help='number of total epochs to run')\nparser.add_argument('--gpu', default='0', type=str)\nparser.add_argument('--img_dim', default=128, type=int)\nparser.add_argument('--name', help='relative path to load trained encoder and store the model and the tensorboard files')\n\n\n# CLASS PERFORMING TOP-K ACCURACY FOR CLASSIFICATION\nclass top_k(nn.Module):\n def __init__(self, k):\n super(top_k, self).__init__()\n k = [k] if isinstance(k, int) else k\n self.k=k\n \n def forward(self, input, targets):\n accs = []\n for k in self.k:\n acc_k = torch.mean(torch.tensor([(target == input_line).any().float() for (target, input_line) in zip(targets,torch.topk(input, k, dim=1)[1])]))\n accs.append(acc_k)\n return accs\n \n\ndef classify(): \n torch.autograd.set_detect_anomaly(True)\n\n global args; args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(args.gpu)\n global cuda; cuda = torch.device('cuda')\n \n img_path = args.name\n # CREATING ENCODER MODEL, LAST ARGUMENT set as True PREVENTS COSTLY COMPUTATION OF SELF-SUPERVISED LOSSES AND ACCS \n base_model = VGG_8(args.temp_VGG, args.mode, args.spatial_collapse, args.single_predictor, args.spatial_segm, args.predictor_bias, True) \n # IF MODEL FOUND IN FOLDER DESIGNATED BY name, LOAD PARAMETERS\n if os.path.isfile(img_path+'/model.pth.tar'):\n base_model.load_state_dict(torch.load(img_path+'/model.pth.tar'))\n else:\n print('file not found, starts with random encoder')\n \n \n # FREEZE PARAMETERS\n for param in base_model.parameters():\n param.requires_grad = False\n base_model.eval() # ADDITIONAL PRECAUTION + FREEZES THE MEAN AND VAR FROM BATCHNORMS \n \n \n # FAKE INPUT TO COMPUTE SIZE OF CLASSIFIERS (AT EACH LAYER OR JUST AT THE END)\n input = torch.randn(1,3,args.seq_len,args.img_dim,args.img_dim)\n output_sizes = []\n for block in base_model.blocks:\n input = block(input.detach())\n if args.monitor_all_layers:\n # WE DO NOT COUNT TIME DIMENSION (2) BECAUSE IT IS AVERAGE POOLED\n output_sizes.append([int(torch.numel(input)/input.size(2)), input.size(2)])\n\n if not args.monitor_all_layers:\n output_sizes.append([int(torch.numel(input)/input.size(2)),input.size(2)])\n \n \n # CREATION OF THE CLASSIFIER(S) FOR EACH OUTPUT SIZE\n classifications = nn.ModuleList()\n for i, output_size in enumerate(output_sizes):\n classifications.append(nn.Sequential(nn.AvgPool3d((output_size[1],1,1)),nn.Flatten(),nn.BatchNorm1d(output_size[0]), nn.Dropout(0.5), nn.Linear(output_size[0], 101)))\n classifications[i][2].weight.data.fill_(1)\n classifications[i][2].bias.data.zero_() \n \n for name, param in classifications[i][-1].named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.orthogonal_(param, 1) \n \n \n # MIGRATING MODEL AND CLASSIFIERS TO CUDA\n base_model = base_model.to(cuda)\n classifications = classifications.to(cuda)\n\n\n # CHECKING GRADIENTS OF DIFFERENT COMPONENTS\n print('\\n===========Check Grad============')\n for name, param in base_model.named_parameters():\n print(name, param.requires_grad)\n for name, param in classifications.named_parameters():\n print(name, param.requires_grad)\n print('=================================\\n')\n\n\n # GIVE THE CLASSIFIERS' PARAMETERS TO THE OPTIMIZER\n params = classifications.parameters()\n optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd)\n lr_lambda = lambda ep: MultiStepLR_Restart_Multiplier(ep, gamma=0.1, step=[60, 80, 100], repeat=1)\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\n \n \n # DUMMY VALIDATION LOSS FOR SAVING BEST MODEL \n best_loss = 100\n global iteration; iteration = 0\n\n\n # DEFINE THE TRANSFORMATIONS FOR TRAIN AND VALIDATION\n transform = transforms.Compose([\n RandomSizedCrop(consistent=True, size=224, p=1.0),\n Scale(size=(args.img_dim,args.img_dim)),\n RandomHorizontalFlip(consistent=True),\n ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=0.3, consistent=True),\n ToTensor(),\n Normalize()\n ])\n val_transform = transforms.Compose([\n RandomSizedCrop(consistent=True, size=224, p=0.3),\n Scale(size=(args.img_dim,args.img_dim)),\n RandomHorizontalFlip(consistent=True),\n ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1, p=0.3, consistent=True),\n ToTensor(),\n Normalize()\n ])\n\n train_loader = get_data(transform, 'train')\n val_loader = get_data(val_transform, 'val')\n \n \n # SPECIFY IF ALL LAYERS MONITORED\n if args.monitor_all_layers:\n appendix = '_all_layers'\n else:\n appendix = ''\n \n \n # INSTANTIATION OF THE TENSORBOARD MONITORING\n try: # old version\n writer_val = SummaryWriter(log_dir=os.path.join(img_path, 'classification'+appendix+'/val'))\n writer_train = SummaryWriter(log_dir=os.path.join(img_path, 'classification'+appendix+'/train'))\n except: # v1.7\n writer_val = SummaryWriter(logdir=os.path.join(img_path, 'classification'+appendix+'/val'))\n writer_train = SummaryWriter(logdir=os.path.join(img_path, 'classification'+appendix+'/train'))\n \n \n for epoch in range(args.epochs):\n \n train_losses, train_accs = train(base_model, classifications, train_loader, optimizer, epoch, args.monitor_all_layers)\n val_losses, val_accs = validate(base_model, classifications, val_loader, epoch, args.monitor_all_layers)\n\n scheduler.step()\n\n # SAVE CURVES, ITERATE OVER LOSSES OF THE NETWORK (1 LOSS IF END-TO-END AND N IF PER-LAYER)\n for i, (train_loss, val_loss) in enumerate(zip(train_losses, val_losses)):\n writer_train.add_scalar('global/loss_{}'.format(i), train_loss, epoch)\n writer_val.add_scalar('global/loss_{}'.format(i), val_loss, epoch)\n \n # SAVE CURVES, ITERATE OVER ACCURACIES OF THE NETWORK ([3] ACCURACIES IF END-TO-END AND N*[3] IF PER-LAYER)\n for i, (train_acc, val_acc) in enumerate(zip(train_accs, val_accs)):\n for j in range(3):\n a= [1,3,5]\n writer_train.add_scalar('global/accuracy_{}_top_{}'.format(i,a[j]), train_acc[j], epoch)\n writer_val.add_scalar('global/accuracy_{}_top_{}'.format(i, a[j]), val_acc[j], epoch)\n \n # SAVE MODEL IF BEST VALIDATION LOSS\n if val_losses[-1] <= best_loss:\n best_loss = val_loss\n torch.save(classifications.state_dict(), img_path+'/classifier'+appendix+'.pth.tar')\n \n print('epoch {}/{}'.format(epoch, args.epochs))\n \n \n \ndef get_data(transform, mode='train'):\n print('Loading data for \"%s\" ...' % mode)\n global dataset\n dataset = UCF101_3d(mode=mode, \n transform=transform, \n seq_len=args.seq_len,\n num_seq=1, # NUMBER OF SEQUENCES, ARTEFACT FROM DPC CODE, KEEP SET TO 1!\n downsample=3) # FRAME RATE DOWNSAMPLING: FPS = 30/downsample\n\n my_sampler = data.RandomSampler(dataset)\n if mode == 'train':\n data_loader = data.DataLoader(dataset,\n batch_size=args.batch_size,\n sampler=my_sampler,\n shuffle=False,\n num_workers=16,\n pin_memory=True,\n drop_last=True)\n elif mode == 'val':\n data_loader = data.DataLoader(dataset,\n batch_size=args.batch_size,\n sampler=my_sampler,\n shuffle=False,\n num_workers=16,\n pin_memory=True,\n drop_last=True)\n\n print('\"%s\" dataset size: %d' % (mode, len(dataset)))\n return data_loader\n\n\n\ndef train(model, classifiers, data_loader, optimizer, epoch, monitor_all_layers):\n cuda = torch.device('cuda')\n # SET THE LOSSES AND ACCURACIES\n # WARNING: USING x*[OBJECT] DUPLICATES REFERENCES TO THE SAME OBJECT INSTANCE\n # HENCE THE FOR LOOP\n losses = []\n accuracies = []\n Losses = [] \n Accs = [] \n if isinstance(classifiers, nn.ModuleList):\n for i in range(len(classifiers)):\n losses.append(AverageMeter())\n accuracies.append([AverageMeter(),AverageMeter(),AverageMeter()])\n Losses.append(nn.CrossEntropyLoss())\n Accs.append( top_k([1,3,5]))\n \n model.eval()\n classifiers.train()\n \n for (input_seq, target) in tqdm(data_loader):\n # CREATING THE LIST OF NETWORK OUTPUTS AND CLASSIFICATION LOSSES\n res_losses = []\n outputs = []\n\n input_seq = input_seq.squeeze().to(cuda)\n target = target.squeeze().to(cuda)\n B = input_seq.size(0)\n \n # IF ONLY CLASSIFICATION AT FINAL LAYER\n if not monitor_all_layers:\n _, _, output = model(input_seq)\n outputs.append(output)\n else:\n output=input_seq\n # OTHERWISE AT EACH LAYER \n for block in model.blocks:\n output = block(output)\n outputs.append(output)\n \n \n # MEASURE THE CLASSIFICATION PERFORMANCE \n for output, classifier, Loss, Acc, loss, accuracy in zip(outputs, classifiers, Losses, Accs, losses, accuracies):\n # PASS THE OUTPUT(S) TO ITS/THEIR CLASSIFIER \n output = classifier(output.detach())\n # COMPUTE THE CLASSIFIER'S LOSS AND ACCURACIES\n l = Loss(output, target)\n res_losses.append(l)\n acc = Acc(output, target)\n loss.update(l.item(), B)\n for j in range(3):\n accuracy[j].update(acc[j].item(), B)\n\n # ITERATE OVER LOSS OF EVERY CLASSIFIER, BACKWARD AND UPDATE\n optimizer.zero_grad()\n for l in res_losses:\n l.backward()\n optimizer.step()\n \n # PRINT PERFORMANCES INDEXES AT EVERY EPOCH\n for loss, acc in zip(losses, accuracies):\n print('Training loss: {:.4f} | top1: {:.4f} | top3: {:.4f} | top5: {:.4f}'.format(loss.avg, acc[0].avg, acc[1].avg ,acc[2].avg))\n return [loss.local_avg for loss in losses], [[acc[0].avg,acc[1].avg,acc[2].avg] for acc in accuracies]\n \n\n\ndef validate(model, classifiers, data_loader, epoch, monitor_all_layers):\n cuda = torch.device('cuda')\n # SET THE LOSSES AND ACCURACIES\n # WARNING: USING x*[OBJECT] DUPLICATES REFERENCES TO THE SAME OBJECT INSTANCE\n # HENCE THE FOR LOOP\n losses = []\n accuracies = []\n Losses = [] \n Accs = [] \n if isinstance(classifiers, nn.ModuleList):\n for i in range(len(classifiers)):\n losses.append(AverageMeter())\n accuracies.append([AverageMeter(),AverageMeter(),AverageMeter()])\n Losses.append(nn.CrossEntropyLoss())\n Accs.append( top_k([1,3,5]))\n \n model.eval()\n classifiers.eval()\n for (input_seq, target) in tqdm(data_loader):\n # CREATING THE LIST OF NETWORK OUTPUTS\n outputs = []\n input_seq = input_seq.squeeze().to(cuda)\n target = target.squeeze().to(cuda)\n B = input_seq.size(0)\n \n \n # IF ONLY CLASSIFICATION AT FINAL LAYER\n if not monitor_all_layers:\n _, _, output = model(input_seq)\n outputs.append(output)\n else:\n output=input_seq\n # OTHERWISE AT EACH LAYER \n for block in model.blocks:\n output = block(output)\n outputs.append(output)\n \n \n # MEASURE THE CLASSIFICATION PERFORMANCE \n for output, classifier, Loss, Acc, loss, accuracy in zip(outputs, classifiers, Losses, Accs, losses, accuracies):\n # PASS THE OUTPUT(S) TO ITS/THEIR CLASSIFIER \n output = classifier(output.detach())\n # COMPUTE THE CLASSIFIER'S LOSS AND ACCURACIES\n l = Loss(output, target)\n acc = Acc(output, target)\n loss.update(l.item(), B)\n for j in range(3):\n accuracy[j].update(acc[j].item(), B)\n \n \n # PRINT PERFORMANCES INDEXES AT EVERY EPOCH\n for loss, acc in zip(losses, accuracies):\n print('Validation loss: {:.4f} | top1: {:.4f} | top3: {:.4f} | top5: {:.4f}'.format(loss.avg, acc[0].avg, acc[1].avg ,acc[2].avg))\n return [loss.local_avg for loss in losses], [[acc[0].avg,acc[1].avg,acc[2].avg] for acc in accuracies]\n\n\n# USE OF THE SAME LEARNING RATE SCHEDULER AS DPC, SHOULD BE TAKEN AWAY FOR MORE STABLE RESULTS\ndef MultiStepLR_Restart_Multiplier(epoch, gamma=0.1, step=[10,15,20], repeat=3):\n '''return the multipier for LambdaLR, \n 0 <= ep < 10: gamma^0\n 10 <= ep < 15: gamma^1 \n 15 <= ep < 20: gamma^2\n 20 <= ep < 30: gamma^0 ... repeat 3 cycles and then keep gamma^2'''\n max_step = max(step)\n effective_epoch = epoch % max_step\n if epoch // max_step >= repeat:\n exp = len(step) - 1\n else:\n exp = len([i for i in step if effective_epoch>=i])\n return gamma ** exp\n\n\nif __name__ == '__main__':\n args = sys.argv\n\n classify()","sub_path":"video/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":15160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"599621637","text":"#Snake\r\n\r\nimport random\r\nimport pygame as pg\r\nimport tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\nclass Cube(object):\r\n rows=20\r\n w=500\r\n\r\n def __init__(self,start,dirnx=1,dirny=0,color=(255,0,0)):\r\n self.pos=start\r\n self.dirnx=dirnx\r\n self.dirny=dirny\r\n self.color=color\r\n\r\n def move(self,dirnx,dirny):\r\n self.dirnx=dirnx\r\n self.dirny=dirny\r\n self.pos=(self.pos[0]+self.dirnx,self.pos[1]+self.dirny)\r\n\r\n def draw(self,surface,eyes=False):\r\n dis=self.w//self.rows\r\n i=self.pos[0]\r\n j=self.pos[1]\r\n\r\n pg.draw.rect(surface,self.color,(i*dis+1,j*dis+1,dis-2,dis-2))\r\n if eyes:\r\n centre = dis//2\r\n radius = 3\r\n circleMiddle = (i*dis+centre-radius,j*dis+8)\r\n circleMiddle2 = (i*dis + dis -radius*2, j*dis+8)\r\n pg.draw.circle(surface, (0,0,0), circleMiddle, radius)\r\n pg.draw.circle(surface, (0,0,0), circleMiddle2, radius)\r\n \r\nclass Snake(object):\r\n body=[]\r\n turns={}\r\n def __init__(self,color,pos):\r\n self.color=color\r\n self.dirnx=0\r\n self.dirny=-1\r\n self.head=Cube(pos,dirnx=self.dirnx,dirny=self.dirny)\r\n self.body.append(self.head)\r\n \r\n def move(self):\r\n for event in pg.event.get():\r\n if event.type==pg.QUIT:\r\n pg.quit()\r\n keys=pg.key.get_pressed()\r\n for key in keys:\r\n if keys[pg.K_a]:\r\n self.dirnx=-1\r\n self.dirny=0\r\n self.turns[self.head.pos[:]]=[self.dirnx,self.dirny]\r\n elif keys[pg.K_d]:\r\n self.dirnx=1\r\n self.dirny=0\r\n self.turns[self.head.pos[:]]=[self.dirnx,self.dirny]\r\n elif keys[pg.K_w]:\r\n self.dirnx=0\r\n self.dirny=-1\r\n self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]\r\n elif keys[pg.K_s]:\r\n self.dirnx=0\r\n self.dirny=1\r\n self.turns[self.head.pos[:]]=[self.dirnx,self.dirny]\r\n for i,c in enumerate(self.body):\r\n p=c.pos[:]\r\n if p in self.turns:\r\n turn = self.turns[p]\r\n c.move(turn[0],turn[1])\r\n if i == len(self.body)-1:\r\n self.turns.pop(p)\r\n else:\r\n if c.dirnx == -1 and c.pos[0] <= 0: c.pos = (c.rows-1, c.pos[1])\r\n elif c.dirnx == 1 and c.pos[0] >= c.rows-1: c.pos = (0,c.pos[1])\r\n elif c.dirny == 1 and c.pos[1] >= c.rows-1: c.pos = (c.pos[0], 0)\r\n elif c.dirny == -1 and c.pos[1] <= 0: c.pos = (c.pos[0],c.rows-1)\r\n else: c.move(c.dirnx,c.dirny)\r\n\r\n \r\n def reset(self,pos):\r\n self.head=Cube(pos)\r\n self.body=[]\r\n self.body.append(self.head)\r\n self.turns={}\r\n self.dirnx=0\r\n self.dirny=-1\r\n \r\n def addCube(self):\r\n tail = self.body[-1]\r\n dx, dy = tail.dirnx, tail.dirny\r\n\r\n if dx == 1 and dy == 0:\r\n self.body.append(Cube((tail.pos[0]-1,tail.pos[1])))\r\n elif dx == -1 and dy == 0:\r\n self.body.append(Cube((tail.pos[0]+1,tail.pos[1])))\r\n elif dx == 0 and dy == 1:\r\n self.body.append(Cube((tail.pos[0],tail.pos[1]-1)))\r\n elif dx == 0 and dy == -1:\r\n self.body.append(Cube((tail.pos[0],tail.pos[1]+1)))\r\n\r\n self.body[-1].dirnx = dx\r\n self.body[-1].dirny = dy\r\n \r\n def draw(self,surface):\r\n for i,c in enumerate(self.body):\r\n if i==0:\r\n c.draw(surface,True)\r\n else:\r\n c.draw(surface)\r\n\r\ndef drawGrid(w,rows,surface):\r\n sizeBtwn=w//rows\r\n x=0\r\n y=0\r\n for l in range(rows):\r\n x+=sizeBtwn\r\n y+=sizeBtwn\r\n\r\n pg.draw.line(surface, (255,255,255),(x,0),(x,w))\r\n pg.draw.line(surface, (255,255,255),(0,y),(w,y))\r\n\r\ndef redrawWindow(surface):\r\n global width, rows,s,snack\r\n surface.fill((0,0,0))\r\n s.draw(surface)\r\n snack.draw(surface)\r\n drawGrid(width,rows,surface)\r\n pg.display.update()\r\n\r\ndef randomSnack(rows,item):\r\n positions=item.body\r\n while True:\r\n x=random.randrange(rows)\r\n y=random.randrange(rows)\r\n if len(list(filter(lambda z:z.pos == (x,y), positions))) > 0:\r\n continue\r\n else:\r\n break\r\n return (x,y)\r\n\r\ndef message_box(subject,content):\r\n root=tk.Tk()\r\n root.attributes(\"-topmost\",True)\r\n root.withdraw()\r\n messagebox.showinfo(subject,content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass\r\n\r\ndef main():\r\n global width,rows,s, snack\r\n width=500\r\n rows=20\r\n win=pg.display.set_mode((width,width))\r\n pg.display.set_caption(\"Snake\")\r\n icon=pg.image.load(\"snake.png\")\r\n #
    Icons made by Freepik from www.flaticon.com
    \r\n pg.display.set_icon(icon)\r\n\r\n \r\n s=Snake((255,0,0),(10,10))\r\n snack=Cube(randomSnack(rows,s),color=(0,255,0))\r\n flag=True\r\n\r\n clock=pg.time.Clock()\r\n\r\n while flag:\r\n pg.time.delay(50)\r\n clock.tick(10)\r\n s.move()\r\n if s.body[0].pos==snack.pos:\r\n s.addCube()\r\n snack=Cube(randomSnack(rows,s),color=(0,255,0))\r\n \r\n for x in range(len(s.body)):\r\n if s.body[x].pos in list(map(lambda z:z.pos,s.body[x+1:])):\r\n #print('Score: ',len(s.body))\r\n message_box(\"You Lost!\",f'Score: {len(s.body)}\\nPlay again...')\r\n s.reset((10,10))\r\n break\r\n\r\n redrawWindow(win)\r\n\r\n pass\r\n\r\n\r\nmain()\r\n","sub_path":"Snake/Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621564337","text":"L = [1,2,3]\nI = iter(L)\n# print(I.__next__(),\n # I.__next__(),\n # I.__next__())\n# for X in L:\n # print(X**2, end=' ')\n\n# I = iter(L)\n\n# while True:\n # try:\n # X = I.__next__()\n # except StopIteration:\n # break\n # print(X**2, end=' ')\n \n# R = range(5)\n# print(R)\n# I = iter(R)\n# print(I.__next__())\n# print(list(range(5)))\n\n# for idx, item in enumerate(\"spam\"):\n # print(idx, item)\n# E = enumerate(\"spam\")\n# print(iter(E) is E)\n# print(E.__next__())\n# print(E.__next__())\n# for a in E:\n # print(a)\n \n# M = map(abs, (-1, 0, 1))\n# print(list(M))\n# print(iter(M) is M)\n# M = map(abs, (-1, 0, 1))\n# print(M.__next__())\n# print(M.__next__())\n# print(M.__next__())\n# for x in M:print(x)\n# M = map(abs, (-1, 0, 1))\n# for x in M:print(x)\n# for x in M:print(x)\n# for x in M:print(x)\n# Z = zip((1,2,3),(\"a\",\"b\",\"c\"))\n# print(Z)\n# print([x for x in ['spam', '', 'ni'] if bool(x)])\n\ndef func(seq1, seq2):\n res = []\n for x in seq1:\n if x in seq2:\n res.append(x)\n return res\n\n# s1 = \"Spam\"\n# s2 = \"Scam\"\n# print(func(s1, s2))\n# z = [x for x in s1 if x in s2]\n# print(z)\n\ndef f(a, *pargs, **kargs): print(a, pargs, kargs)\n# f(1,2,3,x=1,y=2)\n\ndef intersect(*args):\n res = []\n for x in args[0]:\n if x in res: continue\n for other in args[1:]:\n if x not in other: break\n else:\n res.append(x)\n return res\n\ndef union(*args):\n res = []\n for seq in args:\n for x in seq:\n if not x in res:\n res.append(x)\n return res\n \ns1, s2, s3 = \"SPAM\", \"SCAM\", \"SLAM\"\n# print(intersect(s1,s2))\n# print(union(s1,s3))\n\n# def f(L):\n # print(\"L is: \", L)\n # if not L:\n # print(\"Hey, L does not exist!\")\n # return 0\n # else:\n # print(\"Keep going\")\n # return L[0] + f(L[1:])\n \n# def f(L):\n # print(L)\n # if not L:\n # return 0\n # else:\n # return L[0]+f(L[1:])\n\n# def f(L):\n # print(L)\n # return 0 if not L else L[0] + f(L[1:])\n\ndef f(L):\n print(L)\n return L[0] if len(L)==1 else L[0] + f(L[1:])\n# def f(L):\n # print(L)\n # tot = 0\n # for x in L:\n # if not isinstance(x, list):\n # tot += x\n # else:\n # tot += f(x)\n # return tot\n \n \n \n#f([1,2,3,4,5])\n#f([])\nf(['s','p', 'a', 'm'])\nprint(len(['s','p', 'a', 'm']))\n \n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LectureNotes/Lecture5.py","file_name":"Lecture5.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588910214","text":"\nfrom gym import spaces\nimport numpy as np\nfrom bark.models.dynamic import StateDefinition\nfrom modules.runtime.commons.parameters import ParameterServer\nimport math\nimport operator\nfrom src.commons.spaces import BoundedContinuous, Discrete\n\nfrom src.observers.observer import StateObserver\n\n\nclass SimpleObserver(StateObserver):\n def __init__(self,\n params=ParameterServer()):\n StateObserver.__init__(self, params)\n self._state_definition = [int(StateDefinition.X_POSITION),\n int(StateDefinition.Y_POSITION),\n int(StateDefinition.THETA_POSITION),\n int(StateDefinition.VEL_POSITION)]\n self._observation_len = \\\n self._max_num_vehicles*self._len_state\n\n def observe(self, world, agents_to_observe):\n \"\"\"see base class\n \"\"\"\n\n concatenated_state = np.zeros(self._observation_len, dtype=np.float32)\n for i, (_, agent) in enumerate(world.agents.items()):\n normalized_state = self._normalize(agent.state)\n reduced_state = self._select_state_by_index(normalized_state)\n starts_id = i*self._len_state\n concatenated_state[starts_id:starts_id+self._len_state] = reduced_state\n \n if i >= self._max_num_vehicles:\n break\n return concatenated_state\n \n\n def _norm(self, agent_state, position, range):\n agent_state[int(position)] = \\\n (agent_state[int(position)] - range[0])/(range[1]-range[0])\n return agent_state\n\n def _normalize(self, agent_state):\n agent_state = \\\n self._norm(agent_state,\n StateDefinition.X_POSITION,\n self._world_x_range)\n agent_state = \\\n self._norm(agent_state,\n StateDefinition.Y_POSITION,\n self._world_y_range)\n agent_state = \\\n self._norm(agent_state,\n StateDefinition.THETA_POSITION,\n self._theta_range)\n agent_state = \\\n self._norm(agent_state,\n StateDefinition.VEL_POSITION,\n self._velocity_range)\n return agent_state\n\n def reset(self, world, agents_to_observe):\n super(SimpleObserver, self).reset(world, agents_to_observe)\n return world\n\n @property\n def observation_space(self):\n return spaces.Box(\n low=np.zeros(self._observation_len),\n high=np.ones(self._observation_len))\n\n @property\n def _len_state(self):\n return len(self._state_definition)\n\n\n","sub_path":"src/observers/simple_observer.py","file_name":"simple_observer.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"575752523","text":"\nimport numpy as np\n\n#from traffic_light_display import TrafficLightDisplay\n\n#------------------------------------------------------------------------------\n\n\n\nclass TrafficLightCycle(object):\n \n '''\n Put this in the heap along with vehicles.\n Extract it from the heap at its start time to change the current cycle\n of the traffic light to the extacted cycle.\n '''\n \n def __init__(self,start_time,cycle_specs,traffic_light=None):\n self.time = start_time\n assert len(cycle_specs) == 5 # (duration,to_unlock,tpm,to_lock,display)\n self.cycle_specs = cycle_specs\n self.start_time = start_time\n self.end_time = start_time + cycle_specs[0]\n self.traffic_light = traffic_light\n self.is_cycle = True\n \n\n #--------------------------------------------------------------------------\n \n def activate(self):\n duration,to_unlock,tpm,to_lock,signal_specs = self.cycle_specs\n # Lock the entrances \n self.traffic_light.lock(to_lock)\n self.traffic_light.unlock(to_unlock)\n self.traffic_light.tpm = tpm\n# self.traffic_light.unlock_time = self.end_time\n self.traffic_light.update_unlock_time(self.end_time)\n \n if self.traffic_light.display is not None :\n traffic_light_display = self.traffic_light.display\n# assert isinstance(traffic_light_display,TrafficLightDisplay)\n traffic_light_display.show_signals(signal_specs)\n return None \n\n \n #--------------------------------------------------------------------------\n \n def __eq__(self,other):\n return self.time == other.time\n \n #--------------------------------------------------------------------------\n \n def __lt__(self,other):\n return self.time < other.time\n \n #--------------------------------------------------------------------------\n \n def __repr__(self):\n return 'Traffic Light Cycle ({0})'.format(self.time)\n\n\n\n#------------------------------------------------------------------------------\n\ndef N_S_flow(duration:float,\n prob_N_S:float,\n prob_S_N:float):\n \n '''\n Northern and Southern entrances allow traffic to flow forward and turn left.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n prob_N_S: float\n The probability that a vehicle at the Northern entrance selects South\n as its exit (moves forward). With probability 1 - prob_N_S will a \n vehicle then select East as its exit (left turn).\n prob_S_N: float\n The probability that a vehicle at the Southern entrance selects North\n as its exit (moves forward). With probability 1 - prob_S_N will a \n vehicle then select West as its exit (left turn).\n \n Raises:\n -------\n AssertionError:\n Probability values are not valid.\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n '''\n \n assert prob_N_S <= 1 and prob_N_S >= 0\n assert prob_S_N <= 1 and prob_S_N >= 0\n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n prob_N_E = 1 - prob_N_S\n prob_S_W = 1 - prob_S_N\n entrances = (0,2)\n to_lock = (1,3)\n tpm = np.zeros((4,4))\n tpm[0,2] = prob_N_S\n tpm[0,1] = prob_N_E\n tpm[2,0] = prob_S_N\n tpm[2,3] = prob_S_W\n \n \n display = [{'color':'green','message':'Go'}, # N\n {'color':'red','message':'Stop'}, # E\n {'color':'green','message':'Go'}, # S\n {'color':'red','message':'Stop'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\ndef W_E_flow(duration:float,\n prob_W_E:float,\n prob_E_W:float):\n \n '''\n Western and Eastern entrances allow traffic to flow forward and turn left.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n prob_W_E: float\n The probability that a vehicle at the Western entrance selects East\n as its exit (moves forward). With probability 1 - prob_W_E will a \n vehicle then select North as its exit (left turn).\n prob_E_W: float\n The probability that a vehicle at the Eastern entrance selects West\n as its exit (moves forward). With probability 1 - prob_E_W will a \n vehicle then select South as its exit (left turn).\n \n Raises:\n -------\n AssertionError:\n Probability values are not valid.\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n '''\n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n assert prob_W_E <= 1 and prob_W_E >= 0\n assert prob_E_W <= 1 and prob_E_W >= 0\n prob_W_N = 1 - prob_W_E\n prob_E_S = 1 - prob_E_W\n entrances = (1,3)\n to_lock = (0,2)\n tpm = np.zeros((4,4))\n tpm[1,3] = prob_E_W\n tpm[1,2] = prob_E_S\n tpm[3,1] = prob_W_E\n tpm[3,0] = prob_W_N\n \n display = [{'color':'red','message':'Stop'}, # N\n {'color':'green','message':'Go'}, # E\n {'color':'red','message':'Stop'}, # S\n {'color':'green','message':'Go'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\n\ndef N_S_overwash(duration:float):\n \n '''\n Northern entrance only flows to the Western exit.\n Southern entrance only flows to the Eastern exit.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n \n Raises:\n -------\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n '''\n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n entrances = (0,2)\n to_lock = (1,3)\n tpm = np.zeros((4,4))\n tpm[0,3] = 1\n tpm[2,1] = 1\n \n display = [{'color':'green','message':'-> only'}, # N\n {'color':'red','message':'Stop'}, # E\n {'color':'green','message':'-> only'}, # S\n {'color':'red','message':'Stop'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\ndef W_E_overwash(duration:float):\n \n '''\n Eastern entrance only flows to the Northern exit.\n Western entrance only flows to the Southern exit.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n \n Raises:\n -------\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n '''\n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n \n entrances = (1,3)\n to_lock = (0,2)\n tpm = np.zeros((4,4))\n tpm[1,0] = 1\n tpm[3,2] = 1\n \n display = [{'color':'red','message':'Stop'}, # N\n {'color':'green','message':'-> only'}, # E\n {'color':'red','message':'Stop'}, # S\n {'color':'green','message':'-> only'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\n\n\ndef N_flow(duration:float,\n prob_N_E:float,\n prob_N_S:float,\n prob_N_W:float):\n \n '''\n Only flow from the Northern entrance is permitted.\n It can flow to any of the other exits except North.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n prob_N_E: float\n Left turn.\n prob_N_S: float\n Move forward.\n prob_N_W: float\n Right turn.\n \n Raises:\n -------\n AssertionError:\n Probability values are not valid.\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n ''' \n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n assert prob_N_E + prob_N_S + prob_N_W == 1\n entrances = (0,)\n to_lock = (1,2,3)\n tpm = np.zeros((4,4))\n tpm[0,1] = prob_N_E \n tpm[0,2] = prob_N_S\n tpm[0,3] = prob_N_W\n \n display = [{'color':'green','message':'Go ->'}, # N\n {'color':'red','message':'Stop'}, # E\n {'color':'red','message':'Stop'}, # S\n {'color':'red','message':'Stop'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\ndef S_flow(duration:float,\n prob_S_W:float,\n prob_S_N:float,\n prob_S_E:float):\n \n '''\n Only flow from the Southern entrance is permitted.\n It can flow to any of the other exits except South.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n prob_S_W: float\n Left turn.\n prob_S_N: float\n Move forward.\n prob_S_E: float\n Right turn.\n \n Raises:\n -------\n AssertionError:\n Probability values are not valid.\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n ''' \n \n assert prob_S_W + prob_S_N + prob_S_E == 1\n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n\n entrances = (2,)\n to_lock = (0,1,3)\n tpm = np.zeros((4,4))\n tpm[2,0] = prob_S_N\n tpm[2,3] = prob_S_W\n tpm[2,1] = prob_S_E\n \n display = [{'color':'red','message':'Stop'}, # N\n {'color':'red','message':'Stop'}, # E\n {'color':'green','message':'Go ->'}, # S\n {'color':'red','message':'Stop'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\ndef E_flow(duration:float,\n prob_E_S:float,\n prob_E_W:float,\n prob_E_N:float):\n \n '''\n Only flow from the Eastern entrance is permitted.\n It can flow to any of the other exits except East.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n prob_E_S: float\n Left turn.\n prob_E_W: float\n Move forward.\n prob_E_N: float\n Right turn.\n \n Raises:\n -------\n AssertionError:\n Probability values are not valid.\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n ''' \n \n assert prob_E_N + prob_E_S + prob_E_W == 1\n\n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n entrances = (1,)\n to_lock = (0,2,3)\n tpm = np.zeros((4,4))\n tpm[1,2] = prob_E_S\n tpm[1,3] = prob_E_W\n tpm[1,0] = prob_E_N\n \n display = [{'color':'red','message':'Stop'}, # N\n {'color':'green','message':'Go ->'}, # E\n {'color':'red','message':'Stop'}, # S\n {'color':'red','message':'Stop'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\ndef W_flow(duration:float,\n prob_W_N:float,\n prob_W_E:float,\n prob_W_S:float):\n \n '''\n Only flow from the Eastern entrance is permitted.\n It can flow to any of the other exits except East.\n \n Parameters:\n -----------\n duration: float\n A value in seconds of how long this state of the traffic light will\n last.\n prob_W_N: float\n Left turn.\n prob_W_E: float\n Move forward.\n prob_W_S: float\n Right turn.\n \n Raises:\n -------\n AssertionError:\n Probability values are not valid.\n ValueError:\n duration must be positive and non-zero.\n \n Returns:\n ---------\n tuple: not relevant for user purposes.\n ''' \n if duration <= 0:\n raise ValueError('We must have duration > 0 .')\n assert prob_W_N + prob_W_E + prob_W_S\n\n entrances = (3,)\n to_lock = (0,1,2)\n tpm = np.zeros((4,4))\n tpm[3,0] = prob_W_N\n tpm[3,1] = prob_W_E\n tpm[3,2] = prob_W_S\n \n display = [{'color':'red','message':'Stop'}, # N\n {'color':'red','message':'Stop'}, # E\n {'color':'red','message':'Stop'}, # S\n {'color':'green','message':'Go ->'}] # W\n \n return duration,entrances,tpm,to_lock,display\n\n#------------------------------------------------------------------------------\n\n\ndefault_cycle = [\n N_flow(10,0.2,0.6,0.2),\n N_S_flow(15,0.8,0.8),\n N_S_overwash(5),\n S_flow(10,0.2,0.6,0.2),\n E_flow(10,0.2,0.6,0.2),\n W_E_flow(15,0.8,0.8),\n W_E_overwash(5),\n W_flow(10,0.2,0.6,0.2)\n ]\n\n#------------------------------------------------------------------------------\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#------------------------------------------------------------------------------","sub_path":"Basik_Tutorial/__basik__/TrafficLightObject/traffic_light_cycle.py","file_name":"traffic_light_cycle.py","file_ext":"py","file_size_in_byte":13891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411426318","text":"import argparse\nimport cPickle\n\nimport cv2\nimport numpy as np\n\nfrom model.googlenet import GoogleNetExtractor\nfrom model.utils import Conf\nfrom model.utils import dataset\n\n# construct the argument parser and parse the command line arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--conf\", required=False, help=\"path to configuration file\",default=\"conf/fungi.json\")\nap.add_argument(\"-i\", \"--image\", required=False, help=\"path to the image to classify\",default=\"/home/joheras/Escritorio/Research/Fungi/FungiImages/decoloracion/azul_acido/control.jpg\")\nap.add_argument(\"-t\", \"--control\", required=False, help=\"path to the control image\",default=\"/home/joheras/Escritorio/Research/Fungi/FungiImages/decoloracion/azul_acido/control.jpg\")\nargs = vars(ap.parse_args())\nconf = Conf(args[\"conf\"])\n\nle = cPickle.loads(open(conf[\"label_encoder_path\"]).read())\noe = GoogleNetExtractor()\nfeatures = oe.describe(np.array([dataset.prepare_image(cv2.imread(args[\"image\"]), conf[\"googlenet_fixed_size\"])], dtype=\"float\"))\nfeaturesControl = oe.describe(np.array([dataset.prepare_image(cv2.imread(args[\"control\"]), conf[\"googlenet_fixed_size\"])], dtype=\"float\"))\n\nmodel = cPickle.loads(open(conf[\"classifier_path\"]+ conf[\"model\"] + \".cpickle\").read())\nprediction = model.predict_proba(np.atleast_2d(features+featuresControl))[0]\nprediction = le.inverse_transform(np.argmax(prediction))\nimage = cv2.imread(args[\"image\"])\ncv2.putText(image, prediction, (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0,\n\t\t(0, 255, 0), 3)\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)","sub_path":"fungi_classification/googlenetwithcontrol/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163015230","text":"\n#\n# Sanitize strings: Make them valid for file names, dataframe names, etc\n#\n\nsanitize_valid_chars = set('_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\nsanitize_dict = {\n '+': '_plus_',\n '-': '_',\n '=': '_eq_',\n '<': '_less_than_',\n '>': '_more_than_',\n '/': '_slash_',\n}\n\n\ndef sanitize_name(s):\n \"\"\" Sanitize a string to be used as a variable or column name \"\"\"\n return ''.join(sanitize_char(c) for c in str(s))\n\n\ndef sanitize_char(c):\n \"\"\" Sanitize a string by only allowing \"valid\" characters \"\"\"\n if c in sanitize_valid_chars:\n return c\n if c in sanitize_dict:\n return sanitize_dict[c]\n return '_'\n","sub_path":"src/logml/util/sanitize.py","file_name":"sanitize.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38611235","text":"###############################################################################\n# #\n# Copyright (C) 2009-2014 Edward d'Auvergne #\n# #\n# This file is part of the program relax (http://www.nmr-relax.com). #\n# #\n# This program is free software: you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation, either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see . #\n# #\n###############################################################################\n\n# Python module imports.\nfrom unittest import TestCase\n\n# relax module imports.\nfrom prompt.interpreter import Interpreter\nfrom lib.errors import RelaxNoneStrError, RelaxNoneListNumError, RelaxStrError\n\n# Unit test imports.\nfrom test_suite.unit_tests._prompt.data_types import DATA_TYPES\n\n\nclass Test_pymol(TestCase):\n \"\"\"Unit tests for the functions of the 'prompt.pymol' module.\"\"\"\n\n def __init__(self, methodName=None):\n \"\"\"Set up the test case class for the system tests.\"\"\"\n\n # Execute the base __init__ methods.\n super(Test_pymol, self).__init__(methodName)\n\n # Load the interpreter.\n self.interpreter = Interpreter(show_script=False, raise_relax_error=True)\n self.interpreter.populate_self()\n self.interpreter.on(verbose=False)\n\n # Alias the user function class.\n self.pymol_fns = self.interpreter.pymol\n\n\n def test_macro_apply_argfail_data_type(self):\n \"\"\"The data_type arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the str arguments, and skip them.\n if data[0] == 'str':\n continue\n\n # The argument test.\n self.assertRaises(RelaxStrError, self.pymol_fns.macro_apply, data_type=data[1])\n\n\n def test_macro_apply_argfail_style(self):\n \"\"\"The style arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the str arguments, and skip them.\n if data[0] == 'str':\n continue\n\n # The argument test.\n self.assertRaises(RelaxStrError, self.pymol_fns.macro_apply, data_type='a', style=data[1])\n\n\n def test_macro_apply_argfail_colour_start_name(self):\n \"\"\"The colour_start_name arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the None and str arguments, and skip them.\n if data[0] == 'None' or data[0] == 'str':\n continue\n\n # The argument test.\n self.assertRaises(RelaxNoneStrError, self.pymol_fns.macro_apply, data_type='a', style='x', colour_start_name=data[1])\n\n\n def test_macro_apply_argfail_colour_start_rgb(self):\n \"\"\"The colour_start_rgb arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the None and num list arguments, and skip them.\n if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):\n continue\n\n # The argument test.\n self.assertRaises(RelaxNoneListNumError, self.pymol_fns.macro_apply, data_type='a', style='x', colour_start_rgb=data[1])\n\n\n def test_macro_apply_argfail_colour_end_name(self):\n \"\"\"The colour_end_name arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the None and str arguments, and skip them.\n if data[0] == 'None' or data[0] == 'str':\n continue\n\n # The argument test.\n self.assertRaises(RelaxNoneStrError, self.pymol_fns.macro_apply, data_type='a', style='x', colour_end_name=data[1])\n\n\n def test_macro_apply_argfail_colour_end_rgb(self):\n \"\"\"The colour_end_rgb arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the None and num list arguments, and skip them.\n if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):\n continue\n\n # The argument test.\n self.assertRaises(RelaxNoneListNumError, self.pymol_fns.macro_apply, data_type='a', style='x', colour_end_rgb=data[1])\n\n\n def test_macro_apply_argfail_colour_list(self):\n \"\"\"The colour_list arg test of the pymol.macro_apply() user function.\"\"\"\n\n # Loop over the data types.\n for data in DATA_TYPES:\n # Catch the None and str arguments, and skip them.\n if data[0] == 'None' or data[0] == 'str':\n continue\n\n # The argument test.\n self.assertRaises(RelaxNoneStrError, self.pymol_fns.macro_apply, data_type='a', style='x', colour_list=data[1])\n","sub_path":"test_suite/unit_tests/_prompt/test_pymol.py","file_name":"test_pymol.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570807069","text":"#!/usr/bin/env python3\n\nimport pysam\nimport sys\n\n\ndef project(bam_path, ref_seq, ref_pos):\n FLANK = 50\n name, pos, sign = project_flank(bam_path, ref_seq, ref_pos, 1)\n if name:\n return name, pos, sign\n else:\n return project_flank(bam_path, ref_seq, ref_pos, 50)\n\n\ndef project_flank(bam_path, ref_seq, ref_pos, flank):\n samfile = pysam.AlignmentFile(bam_path, \"rb\")\n\n #min_ref_diff = None\n last_aln = None\n last_qry_pos = None\n\n for pileup_col in samfile.pileup(ref_seq, max(0, ref_pos - flank), ref_pos + flank, truncate=True,\n max_depth=5, stepper=\"samtools\"):\n #getting the longest alignment over this coordinate\n selected_pileup_aln = None\n largest_len = 0\n for pileup_read in pileup_col.pileups:\n if not pileup_read.is_del and not pileup_read.is_refskip:\n if pileup_read.alignment.query_alignment_length > largest_len:\n selected_pileup_aln = pileup_read\n largest_len = pileup_read.alignment.query_alignment_length\n\n if not selected_pileup_aln:\n continue\n\n #computing read position (not that simple heh)\n selected_aln = selected_pileup_aln.alignment\n left_hard = 0\n if selected_aln.cigartuples[0][0] == 5:\n left_hard = selected_aln.cigartuples[0][1]\n right_hard = 0\n if selected_aln.cigartuples[-1][0] == 5:\n right_hard = selected_aln.cigartuples[-1][1]\n query_length = selected_aln.infer_query_length() + left_hard + right_hard\n\n read_pos = selected_pileup_aln.query_position_or_next + left_hard\n if selected_aln.is_reverse:\n read_pos = query_length - read_pos\n\n last_aln = selected_pileup_aln.alignment\n last_qry_pos = read_pos\n ###\n\n #print(last_qry_pos)\n\n if ref_pos <= pileup_col.pos and last_aln is not None:\n break\n\n if not last_aln:\n return None, None, None\n\n return last_aln.query_name, last_qry_pos, 1 if not last_aln.is_reverse else 0\n\n\ndef bed_liftover(bed_file, bam_file, out_stream, output_failed=False):\n for line in open(bed_file, \"r\"):\n line = line.strip()\n if line.startswith(\"#\"):\n out_stream.write(line + \"\\n\")\n continue\n\n fields = line.split(\"\\t\")\n chr_id, chr_start, chr_end = fields[0], int(fields[1]), int(fields[2])\n\n proj_start_chr, proj_start_pos, proj_start_sign = project(bam_file, chr_id, chr_start)\n proj_end_chr, proj_end_pos, proj_end_sign = project(bam_file, chr_id, chr_end)\n\n #proj_start_chr, proj_start_pos, proj_start_sign = project(bam_file, chr_id, chr_start)\n #proj_end_chr, proj_end_pos, proj_end_sign = project(bam_file, chr_id, chr_end)\n\n if (not proj_start_chr or not proj_end_chr or\n proj_end_chr != proj_start_chr or\n proj_start_sign != proj_end_sign):\n if output_failed:\n print(\"#Failed:\", line)\n continue\n\n if proj_start_sign < 0:\n proj_start_pos, proj_end_pos = proj_end_pos, proj_start_pos\n #if proj_end_pos < proj_start_pos:\n # print(proj_start_pos, proj_start_sign, proj_end_pos, proj_end_sign)\n # raise Exception(\"Negative length interval\")\n\n fields[0], fields[1], fields[2] = proj_start_chr, str(proj_start_pos), str(proj_end_pos)\n out_stream.write(\"\\t\".join(fields) + \"\\n\")\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"usage bed_liftover.py bed_file indexed_bam\")\n return 1\n\n bed_file = sys.argv[1]\n bam_file = sys.argv[2]\n bed_liftover(bed_file, bam_file, sys.stdout, output_failed=True)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hapdup/bed_liftover.py","file_name":"bed_liftover.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"534973295","text":"from odoo import http\nfrom odoo.http import request\nfrom . import data_scrapping\nimport json\nimport datetime\n\nclass Hospital(http.Controller):\n @http.route('/weather_forecast_', type=\"http\", auth=\"public\", website=True)\n def weather_forecast(self, num, **kw):\n dates_list = http.request.env['weather.forecast.date']\n today = datetime.date.today()\n three_day = today + datetime.timedelta(days=2)\n week = today + datetime.timedelta(days=6)\n if num == '1':\n return http.request.render('weather_forecast.dates_weather', {'dates': dates_list.search([('date', '=', today)]), 'count':1})\n elif num == '3':\n return http.request.render('weather_forecast.dates_weather', {'dates': dates_list.search([('date', '>=', today), ('date', '<=', three_day)]), 'count':3})\n else:\n return http.request.render('weather_forecast.dates_weather', {'dates': dates_list.search([('date', '>=', today), ('date', '<=', week)]), 'count':7})\n\n @http.route('/update_weather', type=\"http\", auth=\"public\", website=True)\n def update_weather(self):\n obj = data_scrapping.data_scrapping()\n obj.get_weather()\n data = obj.data\n date = datetime.date.today()\n if data:\n for i in range(9):\n data[i]['date'] = date\n date_info = request.env['weather.forecast.date'].sudo().search([('date', '=', date)], limit=1)\n if date_info:\n date_info.sudo().write(data[i])\n print(date_info)\n else:\n new_date_info = request.env['weather.forecast.date'].sudo().create({'date': date})\n new_date_info.sudo().write(data[i])\n print(new_date_info)\n date += datetime.timedelta(days=1)\n dates_list = http.request.env['weather.forecast.date']\n today = datetime.date.today()\n return request.render('weather_forecast.dates_weather',\n {'dates': dates_list.search([('date', '=', today)]), 'count': 1})\n\n\n\n","sub_path":"controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"437041728","text":"# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nSTATES = [\n \"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\",\n \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\",\n \"MD\", \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\",\n \"NJ\", \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\",\n \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\",\n \"WY\",\n]\n\nWEEKDAYS = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n\n\nclass KonaGrillSpider(scrapy.Spider):\n download_delay = 0.2\n name = \"konagrill\"\n item_attributes = {\"brand\": \"Kona Grill\", \"brand_wikidata\": \"Q6428706\"}\n allowed_domains = [\"konagrill.com\"]\n\n def start_requests(self):\n url_by_state = \"https://www.konagrill.com/ajax/getlocationsbystate\"\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n\n # Get store id per state\n for state in STATES:\n yield scrapy.http.Request(\n url_by_state,\n method=\"POST\",\n body=\"state={}\".format(state),\n callback=self.parse,\n headers=headers,\n )\n\n def parse(self, response):\n store_data = json.loads(response.text)\n url_location_details = \"https://www.konagrill.com/ajax/getlocationdetails\"\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n store_ids = []\n\n if not store_data.get(\"data\"):\n return\n\n store_ids += [s.get(\"id\") for _, s in store_data.get(\"data\").items()]\n\n # Get store details\n for i in store_ids:\n yield scrapy.http.Request(\n url_location_details,\n method=\"POST\",\n body=\"id={}\".format(i),\n callback=self.parse_store,\n headers=headers,\n )\n\n def parse_store(self, response):\n response_data = json.loads(response.text)\n if not response_data.get(\"data\"):\n return\n\n store = response_data.get(\"data\")\n dh = store.get(\"dininghours\")\n # Data is inconsistent some keys were found with a trailing space\n opening_hours = self.parse_hours(dh.get(\"dining hours\") or dh.get(\"dining hours \"))\n properties = {\n \"addr_full\": store.get(\"address\"),\n \"city\": store.get(\"city\"),\n \"extras\": {\"email\": store.get(\"email\"),},\n \"lat\": store.get(\"latitude\"),\n \"lon\": store.get(\"longitude\"),\n \"name\": store.get(\"title\"),\n \"opening_hours\": opening_hours,\n \"phone\": store.get(\"phone_number\"),\n \"postcode\": store.get(\"zip\"),\n \"ref\": store.get(\"id\"),\n \"state\": store.get(\"state\"),\n \"website\": store.get(\"order_online_url\"),\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, hours):\n oh = OpeningHours()\n\n for t in hours:\n # Some day entries contain invalid week data, e.g. \"Brunch\"\n # \"Brunch\" is a special dining hour that is contained in regular hours, ignore it\n if \"Brunch\" in t.get(\"days\"):\n continue\n days = self.parse_days(t.get(\"days\"))\n open_time, close_time = t.get(\"hours\").split(\"-\")\n ot = open_time.strip()\n ct = close_time.strip()\n for day in days:\n oh.add_range(day=day, open_time=ot, close_time=ct, time_format=\"%I%p\")\n\n return oh.as_opening_hours()\n\n def parse_days(self, days):\n \"\"\"Parse day ranges and returns a list of days it represent\n The following formats are considered:\n - Single day, e.g. \"Mon\", \"Monday\"\n - Range, e.g. \"Mon-Fri\", \"Tue-Sund\", \"Sat-Sunday\"\n - Two days, e.g. \"Sat & Sun\", \"Friday & Su\"\n\n Returns a list with the weekdays\n \"\"\"\n parsed_days = []\n\n # Range\n # Produce a list of weekdays between two days e.g. su-sa, mo-th, etc.\n if \"-\" in days:\n d = days.split(\"-\")\n r = [i.strip()[:2] for i in d]\n s = WEEKDAYS.index(r[0].title())\n e = WEEKDAYS.index(r[1].title())\n if s <= e:\n return WEEKDAYS[s : e + 1]\n else:\n return WEEKDAYS[s:] + WEEKDAYS[: e + 1]\n # Two days\n if \"&\" in days:\n d = days.split(\"&\")\n return [i.strip()[:2].title() for i in d]\n # Single days\n else:\n return [days.strip()[:2].title()]\n","sub_path":"locations/spiders/kona_grill.py","file_name":"kona_grill.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"600931709","text":"# capture the video and convert into frames \n# open => run = > exit (q)\n\nfrom cv2 import cv2 as cv # in VScode \n\n\n# capture video \n\ncapture = cv.VideoCapture(' C:\\\\Users\\\\DEVELOPER\\\\Downloads\\\\Video\\\\Video.avi ')\n\ncar_cascade = cv.CascadeClassifier('F:\\\\EVRSYSTEM\\\\AMBULANCE\\\\classifier\\\\cascadeAmbulance.xml') \n# convert into frames \n\nwhile True:\n isTrue, frame = capture.read()\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n cars = car_cascade.detectMultiScale(gray, 1.1, 1)\n for (x,y,w,h) in cars: \n cv.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2) \n\n cv.imshow('showing video',frame)\n if cv.waitKey(20) & 0xFF == ord('q'):\n break\n\ncapture.release()\ncv.destroyAllWindows()\n\nprint('successfully Terminate ')\n\n","sub_path":"sem_III_project/ambulanceDetectionError.py","file_name":"ambulanceDetectionError.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"315176428","text":"\"\"\"\ndata.world-py\nCopyright 2017 data.world, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the\nLicense.\n\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\nimplied. See the License for the specific language governing\npermissions and limitations under the License.\n\nThis product includes software developed at\ndata.world, Inc.(http://data.world/).\n\"\"\"\nfrom __future__ import absolute_import\n\nimport configparser\nimport os\nfrom os import path\n\nimport pytest\nfrom doublex import assert_that\nfrom hamcrest import equal_to, is_not, is_, calling, raises, has_length\nfrom six import StringIO\n\nfrom datadotworld.config import Config\n\n\nclass TestConfig:\n # Fixtures\n\n @pytest.fixture()\n def config_file_path(self, tmpdir):\n return str(tmpdir.join('.dw/config'))\n\n @pytest.fixture()\n def legacy_file_path(self, tmpdir):\n return str(tmpdir.join('.data.world'))\n\n @pytest.fixture()\n def config_directory(self, tmpdir):\n return os.makedirs(str(tmpdir.join('.dw')))\n\n @pytest.fixture()\n def default_config_file(self, config_file_path):\n config_parser = configparser.ConfigParser()\n config_parser.set(configparser.DEFAULTSECT, 'auth_token', 'abcd')\n config_parser.write(open(config_file_path, 'w'))\n\n @pytest.fixture()\n def default_invalid_config_file(self, config_file_path):\n config_parser = configparser.ConfigParser()\n config_parser.read_file(StringIO('[default]'))\n config_parser.set('default', 'auth_token', 'lower_case_default')\n config_parser.write(open(config_file_path, 'w'))\n\n @pytest.fixture()\n def alternative_config_file(self, config_file_path):\n config_parser = configparser.ConfigParser()\n config_parser.add_section('alternative')\n config_parser.set('alternative', 'auth_token', 'alternativeabcd')\n config_parser.write(open(config_file_path, 'w'))\n\n @pytest.fixture()\n def legacy_config_file(self, legacy_file_path):\n with open(legacy_file_path, 'w') as legacy_file:\n legacy_file.write('token=legacyabcd')\n\n # Tests\n\n @pytest.mark.usefixtures('config_directory', 'default_config_file')\n def test_auth_token(self, config_file_path):\n config = Config(config_file_path=config_file_path)\n assert_that(config.auth_token, equal_to('abcd'))\n\n @pytest.mark.usefixtures('config_directory', 'alternative_config_file')\n def test_alternative_token(self, config_file_path):\n config = Config(profile='alternative',\n config_file_path=config_file_path)\n assert_that(config.auth_token, equal_to('alternativeabcd'))\n\n @pytest.mark.usefixtures('legacy_config_file')\n def test_legacy_token(self, legacy_file_path, config_file_path):\n assert_that(path.isfile(config_file_path), is_(False))\n config = Config(legacy_file_path=legacy_file_path,\n config_file_path=config_file_path)\n assert_that(config.auth_token, equal_to('legacyabcd'))\n assert_that(path.isfile(config_file_path), is_(True))\n\n @pytest.mark.usefixtures('config_directory', 'default_invalid_config_file')\n def test_invalid_config_section(self, config_file_path):\n config = Config(config_file_path=config_file_path)\n assert_that(config.auth_token, equal_to('lower_case_default'))\n assert_that(config._config_parser.sections(), has_length(0))\n\n def test_missing_file(self, config_file_path):\n assert_that(path.isfile(config_file_path), is_(False))\n config = Config(config_file_path=config_file_path)\n assert_that(calling(lambda: config.auth_token), raises(RuntimeError))\n\n @pytest.mark.usefixtures('config_directory', 'default_config_file')\n def test_missing_token(self, config_file_path):\n assert_that(path.isfile(config_file_path), is_(True))\n config = Config(profile='missingprofile',\n config_file_path=config_file_path)\n assert_that(calling(lambda: config.auth_token), raises(RuntimeError))\n\n def test_save(self, config_file_path):\n assert_that(path.isfile(config_file_path), is_(False))\n config = Config(config_file_path=config_file_path)\n config.auth_token = 'brandnewtoken'\n config.save()\n config_reload = Config(config_file_path=config_file_path)\n assert_that(path.isfile(config_file_path), is_(True))\n assert_that(config_reload.auth_token, equal_to(config.auth_token))\n\n @pytest.mark.usefixtures('config_directory', 'default_config_file')\n def test_save_overwrite(self, config_file_path):\n config = Config(config_file_path=config_file_path)\n assert_that(config_file_path, is_not(equal_to('newtoken')))\n config.auth_token = 'newtoken'\n config.save()\n config_reloaded = Config(config_file_path=config_file_path)\n assert_that(config_reloaded.auth_token, equal_to('newtoken'))\n","sub_path":"tests/datadotworld/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"335282799","text":"\"\"\"\nLa idea de este juego es el de crear una base de '0's mediante un arreglo de listas\nLa generación del barco a buscar completamente aleatoria\nObjetivo:\nGana el primero que descubra dónde se encuentra el barco\nPuede ser por turnos y simplemente tener un contador para saber cuánto tiempo se\ntardaron los jugadores en encontrar el barco\nAl ganador se le muestra un mensaje de WIN\n\"\"\"\nfrom random import randint\n\njugador1=0\njugador2=0\nn = 2\nboard = []\n\nfor x in range(n):\n board.append([\"O\"] * n)\n\ndef print_board(board):\n for row in board:\n print((\" \").join(row))\n\nprint(\"JUGUEMOS A 'ENCUENTRA MI BARCO'!\")\njugador1=input('Nombre del jugador 1... ')\njugador2=input('Nombre del jugador 2... ')\n\nprint(str(n)+\"x\"+str(n))\nprint(\"El tablero empieza con 0,0 hasta \"+str(n-1)+\",\"+str(n-1))\nprint(\"\"\"\nfila -> -\ncolumna -> |\n\"\"\")\nprint_board(board)\n\ndef random_row(board):\n return randint(0, len(board) - 1)\ndef random_col(board):\n return randint(0, len(board[0]) - 1)\n\nship_row = random_row(board)\nship_col = random_col(board)\n\n\nfor turn in range(1,1000):\n\n if turn % 2 != 0:\n print (\"\\nTurno\", turn)\n print(\"Turno de\", jugador1 )\n guess_row = int(input(\"\\nAdivina la fila:\"))\n guess_col = int(input(\"Adivina la columna:\"))\n winner1 = True\n winner2 = False\n\n else:\n print (\"\\nTurno\", turn)\n print(\"Turno de\", jugador2 )\n guess_row = int(input(\"\\nAdivina la fila:\"))\n guess_col = int(input(\"Adivina la columna:\"))\n winner1 = False\n winner2 = True\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_row][guess_col] = \"+\"\n\n if winner1:\n print(\"\\nFELICIDADES \"+jugador1+\" HUNDISTE MI BARCO QUE ESTABA EN \" +str(ship_row)+\",\"+str(ship_col)+\"!\")\n print(jugador1+\", ¡HAS GANADO EL JUEGO!\")\n print_board(board)\n break\n elif winner2:\n print(\"\\nFELICIDADES \"+jugador2+\" HUNDISTE MI BARCO QUE ESTABA EN \" +str(ship_row)+\",\"+str(ship_col)+\"!\")\n print(jugador2+\", ¡HAS GANADO EL JUEGO!\")\n break\n print_board(board)\n\n else:\n if (guess_row < 0 or guess_row > n-1) or (guess_col < 0 or guess_col > n-1):\n print(\"\\nLo siento, eso ni estuvo en el oceano.\")\n elif(board[guess_row][guess_col] == \"X\"):\n print(\"\\nYa habías dicho ese.\")\n else:\n print(\"\\nFallaste mi barco!\\n\")\n board[guess_row][guess_col] = \"X\"\n turn =+ 1\n print_board(board)\n","sub_path":"ene-jun-2018/Juan Carlos Sleiman/Game/battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342912091","text":"import numpy as np\nimport config\nfrom keras.preprocessing.text import Tokenizer\nfrom nltk.util import ngrams\n\ndef load(fname):\n words = set()\n dat = []\n with open(fname,'r',encoding='latin-1') as f:\n for l in f:\n spam,post = l.split(':',1)\n dat.append([post,spam])\n for c in post:\n words.add(c)\n\n dat = np.array(dat)\n words = list(words)\n\n return (dat[:,0],dat[:,1],words)\n\ndef word_grams(words, min=1, max=5):\n s = []\n for n in range(min, max):\n for ngram in ngrams(words, n):\n s.append(' '.join(str(i) for i in ngram))\n return s\n\ndef load_matrix(fname):\n x,y,words = load(fname)\n\n x_ = []\n for post in x:\n grams = ' '.join(word_grams(post,max=config.blog_grams))\n x_.append(grams)\n\n t = Tokenizer(lower=False)\n t.fit_on_texts(x_)\n x_ = t.texts_to_matrix(x_)\n\n return (x_,y)","sub_path":"load_blog.py","file_name":"load_blog.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"290916147","text":"from __future__ import print_function\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom graph import *\n\nimport pickle\n\ndef create_link_dataset(args):\n \"\"\"\n Create and cache train & test graphs.\n Will load from cache if exists unless --regen option is given.\n\n :param args:\n :return:\n Gtrain, Gtest: Train & test graphs\n \"\"\"\n # Remove half the edges, and the same number of \"negative\" edges\n\n # Create random training and test graphs with different random edge selections\n\n print(\"Generating link prediction graphs\")\n # Train graph embeddings on graph with random links\n Gtrain = Graph(prop_pos=args.prop_pos,\n prop_neg=args.prop_neg,\n prop_neg_tot=args.prop_neg_tot)\n if args.graph_format == 'adjlist':\n Gtrain.read_adjlist(filename=args.input)\n elif args.graph_format == 'edgelist':\n Gtrain.read_edgelist(filename=args.input, weighted=args.weighted,\n directed=args.directed)\n Gtrain.generate_pos_neg_links()\n\n cache_data = {'g_train': Gtrain}\n with open(args.output, 'wb') as f:\n pickle.dump(cache_data, f)\n with open(\"{}.{}\".format(args.output, \"edgelist\"), \"w\") as f:\n edges = Gtrain.G.edges()\n for edge in edges:\n f.write(\"{} {}\\n\".format(edge[0], edge[1]))\n\ndef parse_args():\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n\n parser.add_argument('--input', required=True,\n help='Input graph file')\n parser.add_argument('--output', required=True,\n help='Output data set file')\n parser.add_argument('--graph-format', default='adjlist', choices=['adjlist', 'edgelist'],\n help='Input graph format')\n parser.add_argument('--weighted', action='store_true',\n help='Treat graph as weighted')\n parser.add_argument('--directed', action='store_true',\n help='Treat graph as directed.')\n\n parser.add_argument('--prop-pos', default=0.5, type=float,\n help='proportion of positive edges for link prediction')\n parser.add_argument('--prop-neg', default=0.5, type=float,\n help='proportion of negative edges for link prediction')\n parser.add_argument('--prop-neg-tot', default=1.0, type=float,\n help='total proportion of negative edges for link prediction')\n\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n create_link_dataset(args)","sub_path":"gemb_sys/train/link_dataset.py","file_name":"link_dataset.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209723578","text":"# https://leetcode.com/articles/closest-bst-value/\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def closestValue(self, root: TreeNode, target: float) -> int:\n\n # Global variables\n return self.dfs_search(root, target)\n\n # Tidied up solution\n self.cand = 1e10\n self.dfs(root, target)\n return self.cand\n\n def dfs(self, root, target):\n if not root:\n return None\n\n self.dfs(root.left, target)\n val = root.val\n if abs(val - target) < abs(self.cand - target):\n self.cand = val\n self.dfs(root.right, target)\n\n def dfs_search(self, target):\n import math\n self.diff = 1e10\n self.min_diff_val = 0\n\n # DFS inorder + record min only\n def dfs(node, target):\n if node:\n dfs(node.left, target)\n diff = math.fabs(node.val - target)\n if diff < self.diff:\n self.diff = diff\n self.min_diff_val = node.val\n dfs(node.right, target)\n\n dfs(root, target)\n return self.min_diff_val\n","sub_path":"leetcode/lc270_Closest_Binary_Search_Tree_Value.py","file_name":"lc270_Closest_Binary_Search_Tree_Value.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111056771","text":"'''\n\nIndex structure:\n\n The Index class contains a list of IndexItems, stored in a dictionary type for easier access\n\n each IndexItem contains the term and a set of PostingItems\n\n each PostingItem contains a document ID and a list of positions that the term occurs\n\n'''\nimport util\nimport doc\nimport json\nimport sys\nimport math\nimport time\nfrom cran import *\nfrom util import *\n\nclass Posting:\n def __init__(self, docID):\n self.docID = docID\n self.positions = []\n\n def append(self, pos):\n self.positions.append(pos)\n\n def sort(self):\n ''' sort positions'''\n self.positions.sort()\n\n def merge(self, positions):\n self.positions.extend(positions)\n\nclass IndexItem:\n def __init__(self, term):\n self.term = term\n self.posting = {} #postings are stored in a python dict for easier index building\n self.sorted_postings= [] # may sort them by docID for easier query processing\n\n def add(self, docid, pos):\n ''' add a posting'''\n if not docid in self.posting:\n self.posting[docid] = Posting(docid)\n self.posting[docid] = pos\n\nclass InvertedIndex:\n inverted_index = {}\n inverted_index_temp = {}\n def __init__(self):\n self.items = {} # list of IndexItems\n self.nDocs = 0 # the number of indexed documents\n \n def indexDoc(self, doc): # indexing a Document object using functions in util.py\n processedText = preprocessing_txt(doc.body,'')\n for word in processedText.split():\n position = wordPositions(word, processedText)\n if word in self.items.keys(): \n if not int(doc.docID) in list(self.items[word].posting.keys()):\n self.items[word].add(int(doc.docID), position)\n else:\n index_item = IndexItem(word)\n index_item.add(int(doc.docID), position)\n self.items[word] = index_item\n for i in self.items:\n dictionary = {}\n x = 0\n for j in self.items[i].posting:\n listposition = []\n for k in self.items[i].posting[j]:\n listposition.append(k)\n dictionary[j] = listposition\n self.inverted_index_temp[i] = dictionary\n self.inverted_index = self.inverted_index_temp \n\n def find(self, term):\n return self.items[term]\n\n # Calculating and saving to disk TFIDF for the documents in the cran.all and creating, serialize/deserialize the index indexing for the terms from all docs with their positions\n def save(self, savefile, termFrequency, totalDocs, test):\n for i in termFrequency:\n termFrequency[i] = calculateIDF(termFrequency[i], self.inverted_index, totalDocs)\n # Storing tfidf scores for calculating Cosine similarity\n if test == 'true':\n with open('test_tfidf','w') as data:\n json.dump(termFrequency, data)\n with open('test_index','w') as fp:\n json.dump(self.inverted_index, fp)\n\n else:\n with open('tfidf','w') as data:\n json.dump(termFrequency, data)\n with open(savefile,'w') as fp:\n json.dump(self.inverted_index, fp)\n\n# Testing the code on\n# 1. Index Data Saving and Loading on sample data\n# 2. Checking if StopWords are removed and Stemming is done properly\n# 3. Checking for TF IDF scores whether they are properly calculated.\n# TF(t) = (Number of times term t appears in a document) / (Total number of terms in the document)\n# IDF(t) = 1 + log_10(Total number of documents / Number of documents with term t in it).\n# TF-IDF = TF * IDF\ndef test():\n print()\n print('========= Working on Test Cases =========')\n testDict = {}\n manual_indexing = {\"game\": {\"1\": [1, 3]}, \"life\": {\"1\": [2], \"2\": [2]}, \"everlast\": {\"1\": [4]}, \"learn\": {\"1\": [5], \"3\": [3]}, \"unexamin\": {\"2\": [1]}, \"worth\": {\"2\": [3]}, \"live\": {\"2\": [4]}, \"never\": {\"3\": [1]}, \"stop\": {\"3\": [2]}}\n manual_tfidf = {\"1\": {\"game\": 0.590848501887865, \"life\": 0.2352182518111363, \"everlast\": 0.2954242509439325, \"learn\": 0.2352182518111363}, \"2\": {\"unexamin\": 0.3692803136799156, \"life\": 0.29402281476392034, \"worth\": 0.3692803136799156, \"live\": 0.3692803136799156}, \"3\": {\"never\": 0.4923737515732208, \"stop\": 0.4923737515732208, \"learn\": 0.3920304196852271}}\n indexingCranfield('test.txt', 'test_index', 'true')\n test_data = 'This method is to do an experimental with the experiment data for tokenization,stemming and Stopwords '\n print ('Data before processing ' + test_data)\n processed_testData = preprocessing_txt(test_data, 'test')\n print ('Data after processing ' + processed_testData)\n \n with open('test_index') as openTestFile:\n test_indexing = json.load(openTestFile)\n with open('test_tfidf') as testtfidfFile:\n test_tfidfScore = json.load(testtfidfFile)\n if manual_indexing == test_indexing:\n print('Indexing Saving, Loading, calculations are according to the manual results.')\n print('Manual Result ' + str(manual_indexing))\n print('Test Result ' + str(test_indexing))\n else:\n print('Indexing Saving, Loading, calculations are not according to the manual results.')\n if manual_tfidf == test_tfidfScore:\n print('TF-IDF values are calculated correctly')\n print('Manual Result ' + str(manual_tfidf))\n print('Test Result ' + str(test_tfidfScore))\n else:\n print('TF-IDF values are not calculated correctly')\n \n# Indexing the Cranfield dataset and save the index to a file\n# The index is saved to index_file and TFIDF scores are stored in tfidf scores for the terms in the documents from cran.all\ndef indexingCranfield(cranfile,savefile, test):\n # command line usage: \"python index.py cran.all index_file\"\n file = CranFile(cranfile)\n invertedIndex = InvertedIndex()\n temp = {}\n termFrequency = {}\n n = 0\n totalDocs = len(file.docs)\n print(str(len(file.docs)) + ' documents are present in the dataset.')\n for i in file.docs:\n if n == totalDocs: break\n else: \n # For calculating the Term frequency according to the documents \n temp = calculateTF(preprocessing_txt(i.body,''))\n termFrequency[i.docID] = temp\n # For creating the index_file\n invertedIndex.indexDoc(i)\n n += 1 \n invertedIndex.save(savefile, termFrequency, totalDocs, test)\n print ('Indexing file creation is done')\n\n# The below method calculates TF IDF scores considering the termfrequency list generated from the indexing list \ndef calculateIDF(doc, indexData, totalDocs):\n for i in doc.keys():\n if i in indexData.keys():\n count = len(indexData[i].keys())\n doc[i] = doc[i] * float( 1 + math.log10(totalDocs/count))\n return doc\n\n# For calculating the normalized Term Frequency of the terms in the query\ndef calculateTF(text):\n textList = text.split()\n count = 0\n textTF = {}\n for i in range(len(textList)):\n if textList[i] in textTF.keys():\n textTF[textList[i]] += float(1/len(textList))\n else:\n textTF[textList[i]] = float(1/len(textList))\n return textTF\n\n# To find the normalized term location in the processed query\ndef wordPositions(word, processedText):\n pos = 1\n positions = []\n for data in processedText.split():\n if word == data:\n positions.append(pos)\n pos += 1\n return positions\n\nif __name__ == '__main__':\n indexingCranfield(str(sys.argv[1]),str(sys.argv[2]), '')\n test()\n","sub_path":"prj1/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248203331","text":"from app import db\nfrom sqlalchemy.ext.associationproxy import association_proxy\n\nclass Game(db.Model):\n __tablename__ = 'games'\n\n id = db.Column(db.Integer, primary_key=True)\n api_key_id = db.Column(db.Integer, db.ForeignKey('api_keys.id'))\n players = db.relationship('Player')\n\n def all_players(self):\n return [p for p in self.players if not p.center]\n\n def center_roles(self):\n return [p for p in self.players if p.center]\n\n def roles(self):\n return [p.orig_role for p in self.players]\n\n def serialize(self):\n all_roles = [p for p in self.players]\n all_players, center_roles = list(), list()\n for role in all_roles:\n if role.center:\n center_roles.append(role.serialize())\n else:\n all_players.append(role.serialize())\n return {\n 'game_id': self.id,\n 'players': all_players,\n 'center_roles': center_roles\n }\n\nclass Role(db.Model):\n __tablename__ = 'roles'\n\n action_desc = {\n 'villager_action': 'Nothing to do! Find the Werewolves.',\n 'werewolf_action': 'Try not to get caught!'\n }\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, unique=True)\n action = db.Column(db.String)\n\n def serialize(self):\n return {\n 'name': self.name,\n 'action': Role.action_desc[self.action]\n }\n\nclass Player(db.Model):\n __tablename__ = 'players'\n\n id = db.Column(db.Integer, primary_key=True)\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'))\n orig_role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))\n orig_role = db.relationship('Role', foreign_keys=[orig_role_id])\n role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))\n role = db.relationship('Role', foreign_keys=[role_id])\n center = db.Column(db.Boolean, nullable=False)\n action_done = db.Column(db.Boolean, nullable=False, default=False)\n\n def serialize(self):\n return {\n 'player_id': self.id,\n 'game_id': self.game_id,\n 'original_role': self.orig_role.serialize(),\n 'role': self.role.serialize() if self.role else \"\",\n 'center': self.center\n }\n","sub_path":"app/models/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434128915","text":"from django.conf.urls import url\nfrom django.urls import include\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\n\nrouter = DefaultRouter()\nrouter.register('business', views.BusinessViewSet)\nrouter.register('customer', views.CustomerViewSet)\nrouter.register('deliveryman', views.DeliveryManViewSet)\nrouter.register('service', views.ServiceViewSet)\nrouter.register('delivery', views.DeliveryViewSet)\n\n\nurlpatterns = [\n url('suggest/deliveryman/', views.suggest_delivery_men, name='suggest-delivery-men'),\n url('suggest/service/', views.suggest_services, name='suggest-service'),\n\n url('', include(router.urls)),\n]\n\n# Schema\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Kagojer Nouka API\",\n default_version='v1',\n description=\"Kagojer Nouka API\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"tahmeedtarek@gmail.com\"),\n ),\n public=True\n)\n\nurlpatterns += [\n url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n]\n","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"285359865","text":"import random\nimport threading\nimport time\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n\n\n# Class representing the environmental station.\nclass EnvironmentalStation(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n self.mqttClient = AWSIoTMQTTClient(name)\n self.temperature = 0\n self.humidity = 0\n self.windDirection = 0\n self.windIntensity = 0\n self.rainHeight = 0\n\n# Method that simulates the collecting of data from the sensors\n# by randomly generating the numbers.\n def updateSensors(self):\n self.temperature = random.randint(-50, 50)\n self.humidity = random.randint(0, 100)\n self.windDirection = random.randint(0, 360)\n self.windIntensity = random.randint(0, 100)\n self.rainHeight = random.randint(0, 50)\n\n# Method for publishing the current values of the sensors on the MQTT channel.\n# It takes in input a number representing the QoS level.\n# The messages are published on the topic station/.\n def publish(self, QoS):\n return self.mqttClient.publish(\"stations/\" + self.name,\n '{\"temperature\": \"'\n + str(self.temperature) + '\",'\n + '\"humidity\": \"'\n + str(self.humidity) + '\",'\n + '\"windDirection\": \"'\n + str(self.windDirection) + '\",'\n + '\"windIntensity\": \"'\n + str(self.windIntensity) + '\",'\n + '\"rainHeight\": \"'\n + str(self.rainHeight) + '\"}', QoS)\n\n# Method for configuring the MQTT client.\n# It takes in input the endpoint name\n# and the location of the rootCA certificate,\n# the client private key and the client certificate.\n def configureMQTT(self, endpoint, rootCA, privateKey, clientCert):\n self.mqttClient.configureEndpoint(endpoint, 8883)\n self.mqttClient.configureCredentials(rootCA, privateKey, clientCert)\n self.mqttClient.configureOfflinePublishQueueing(-1) # Infinite queue\n self.mqttClient.configureDrainingFrequency(2) # Draining: 2 Hz\n self.mqttClient.configureConnectDisconnectTimeout(10) # 10 sec\n self.mqttClient.configureMQTTOperationTimeout(5) # 5 sec\n\n# Method for connecting the MQTT client to the broker.\n def connect(self):\n return self.mqttClient.connect()\n\n# Method for disconnecting the MQTT client.\n def disconnect(self):\n return self.mqttClient.disconnect()\n\n# Run method of the environmentalStation thread.\n# It starts by configuring the MQTT client.\n# Then it enters a loop in which it tries to connect to the broker,\n# it updates the values of the sensors and publishes them with QoS 0\n# and finally it disconnects and waits for \"time\" seconds.\n# It exits from the loop when the main thread sets the event quit.\n def run(self):\n # In this case the certificates and the key\n # are in the same folder as this script.\n # MAKE SHURE to insert the correct name for your endpoit\n # and the correct location for the certificates and the key.\n self.configureMQTT(\"a29wnmzjyb35x8-ats.iot.us-east-1.amazonaws.com\",\n \"AmazonRootCA1.pem.crt\",\n \"e0a2ae42f8-private.pem.key\",\n \"e0a2ae42f8-certificate.pem.crt\")\n while not quit.is_set():\n try:\n self.connect()\n except:\n continue\n self.updateSensors()\n self.publish(0)\n self.disconnect()\n quit.wait(time)\n\n\nquit = threading.Event() # quit event used to stop the threads\nnumStation = 0 # number of stations to simulate\ntime = 0 # time to wait between each publish\n\n# Setting the number of stations.\nwhile True:\n numStation = input(\"Enter the number of environmental station to launch\\n\")\n try:\n numStation = int(numStation)\n except:\n print(\"ERROR: the value entered is not a number\")\n continue\n break\n\n# Setting the time to wait.\nwhile True:\n time = input(\"Enter the number of seconds to pass between each publish\\n\")\n try:\n time = int(time)\n except:\n print(\"ERROR: the value entered is not a number\")\n continue\n break\n\n# Creating the environmentalStation objects.\nstationList = []\nfor i in range(0, numStation):\n stationList.append(EnvironmentalStation(\"VirtualEnvironmentalStation\"\n + str(i)))\n\n# Spawning the environmentalStation threads.\nfor station in stationList:\n station.start()\n\n# Waiting for user input to stop the threads and close the program\nwhile True:\n if input(\"Enter 'stop' to stop the stations\\n\") == 'stop':\n quit.set()\n for station in stationList:\n station.join()\n break\n","sub_path":"environmental_station/Environmental_station.py","file_name":"Environmental_station.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25525820","text":"from django.urls import path\n\nfrom mysecret.views import (CheckAvailableView, CreateSecretView, PrivateView,\n RetriveSecretView)\n\nurlpatterns = [\n path(r'secrets', CreateSecretView.as_view()),\n path(r'secrets/', RetriveSecretView.as_view()),\n path(r'check/', CheckAvailableView.as_view()),\n path(r'private/', PrivateView.as_view()),\n]\n","sub_path":"backend/mysecret/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128943168","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import User\n\n# Create your views here.\ndef student_list(request, cid=None):\n\t#form = LoginForm(request.POST or None)\n\t#user = User(id=request.user.id)\n\tqueryset = User.objects.all().filter(staff=False, courses__id=cid).order_by('username')\n\t#queryset = user.courses(id=id)\n\tcontext = {\n\t\t\"object_list\": queryset,\n\t\t\"title\": \"Student List\",\n\t\t\"id\": cid,\n\t}\n\treturn render(request, \"account/student_list.html\", context)\n\ndef user_details(request, cid=None, user=None):\n\t#form = LoginForm(request.POST or None)\n\tuser = get_object_or_404(User, id=user)\n\tqueryset = user.courses.all()\n\n\tcontext = {\n\t\t\"name\": user.username,\n\t\t\"user\": user,\n\t\t\"courses\": queryset,\n\t\t\"id\": cid,\n\t}\n\treturn render(request, \"account/student_detail.html\", context)\n\ndef user_profile(request):\n\t#form = LoginForm(request.POST or None)\n\tuser = get_object_or_404(User, id=request.user.id)\n\tqueryset = user.courses.all()\n\n\tcontext = {\n\t\t\"name\": user.username,\n\t\t\"user\": user,\n\t\t\"courses\": queryset,\n\t}\n\treturn render(request, \"account/user_profile.html\", context)\n\n","sub_path":"src/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125199030","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\nfrom OldboyCRM import settings\nimport zipfile\nimport os\n\n\n@shared_task\ndef uploadhomework(class_id, day_num, upload_path):\n customer_file_path = \"%s/%s/%s\" % (settings.HOMEWORK_DATA_DIR, class_id, day_num,)\n filename = '%s.zip' % upload_path.split('/')[-2]\n zipfile_path = \"%s/%s\" % (settings.HOMEWORK_DATA_DIR, class_id)\n zipfile_obj = zipfile.ZipFile(\"%s/%s\" % (zipfile_path, filename), 'a', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(customer_file_path):\n print(dirpath, dirnames, filenames)\n for file in filenames:\n if file.endswith('.zip'):\n f = zipfile.ZipFile(os.path.join(dirpath, file), 'r')\n for file_obj in f.namelist():\n unzipfile = f.extract(file_obj, os.path.join(customer_file_path, 'all',\n file.split('.zip')[0]))\n zipfile_obj.write(unzipfile, os.path.join(file.split('.zip')[0], file_obj))\n zipfile_obj.close()\n\n","sub_path":"student/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"94039244","text":"import cv2\nimport numpy as np\n\nSHOW_BLUR = 0\n\n\ndef gaussianOtsu(grayImg):\n blur = cv2.GaussianBlur(grayImg, (5, 5), 0)\n ret, thr = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n thr = cv2.cvtColor(thr, cv2.COLOR_GRAY2BGR)\n return thr\n\n\ndef illuminateOtsu(img):\n xyz = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)\n y = xyz[:, :, 0]\n cr = xyz[:, :, 1]\n cb = xyz[:, :, 2]\n ret, thr = cv2.threshold(cr, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n thr = cv2.cvtColor(thr, cv2.COLOR_GRAY2BGR)\n return thr\n\n\ndef increaseBrightness(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n v += 1\n img = cv2.merge((h, s, v))\n return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n\n\nfor i in range(7):\n img = cv2.imread(\"data/test_car\" + str(i) + \".jpg\")\n\n # increase brightness and otsu threshold\n croppedCar = increaseBrightness(img)\n carLights = illuminateOtsu(croppedCar)\n\n # grayscale and gaussian blur then otsu threshold\n grayCroppedCar = cv2.cvtColor(croppedCar, cv2.COLOR_BGR2GRAY)\n carLightsGuass = gaussianOtsu(grayCroppedCar)\n\n # show combined images\n images = [img, carLights]\n if SHOW_BLUR:\n images += [carLightsGuass]\n combine = np.concatenate(images, axis=1)\n cv2.imshow(str(i), combine)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"detection/otsu_detection.py","file_name":"otsu_detection.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"301748973","text":"import socket\r\nimport threading\r\nimport util\r\nimport server_commands\r\n\r\n\r\nHEADER = 1024\r\nDISCONNECT_MESSAGE = \"DISCONNECT!\"\r\n\r\n\r\n# Start listening for requests by a client\r\n# Create a thread for a first client\r\n\r\ndef start_server(server):\r\n server.listen()\r\n\r\n # Create first thread for client\r\n t = threading.Thread(target=handle_client, args=(server,))\r\n t.start()\r\n\r\n\r\n# accepts incoming connections\r\n# when a connection is accepted a new thread is created\r\n# to allow multiple clients to connect to the server at the same time\r\n\r\ndef handle_client(server):\r\n conn, addr = server.accept() # accept method is a blocking method.\r\n print(\"[NEW CONNECTION]\", addr[0], \"connected\")\r\n connected = True\r\n\r\n # create new thread for new client.\r\n t = threading.Thread(target=handle_client, args=(server,))\r\n t.start()\r\n\r\n while connected:\r\n try:\r\n request = conn.recv(HEADER).decode(util.FORMAT)\r\n print(request)\r\n\r\n # Check which command was included in the HTTP request\r\n\r\n if 'GET' in request:\r\n server_commands.get_or_head(request, conn, True)\r\n elif 'HEAD' in request:\r\n server_commands.get_or_head(request, conn, False)\r\n elif 'PUT' in request:\r\n server_commands.put(request, conn)\r\n elif 'POST' in request:\r\n server_commands.post(request, conn)\r\n elif request == \"\": # close connection if client sends an empty request\r\n connected = False\r\n else:\r\n print(request)\r\n\r\n # exception occurs when terminal client closes connection -> close connection with socket\r\n except ConnectionResetError:\r\n connected = False\r\n except Exception as err: # Catch all other errors and send 500 Server Error\r\n print('[ERROR]:', err)\r\n response = b'HTTP/1.1 500 Server Error\\r\\n'\r\n server_commands.send(conn, response)\r\n\r\n print(\"[CLOSE CONNECTION]\", addr[0], \"disconnected.\")\r\n conn.close()\r\n\r\n\r\n# Main method of the HTTP server\r\n# First opens the server connection\r\n# Then starts the server functionality for dealing with HTTP requests\r\n\r\ndef main() -> None:\r\n\r\n # assign port and get ip address\r\n\r\n port = 5055\r\n hostname = socket.gethostname()\r\n ip = socket.gethostbyname(hostname)\r\n\r\n # create the server\r\n\r\n server_address: tuple = (ip, port)\r\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server.bind(server_address)\r\n start_server(server)\r\n print(\"[SERVER CREATED]:\", ip, port)\r\n\r\n\r\n# Call the main method\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"462999884","text":"from urllib.parse import urlencode\nfrom multiprocessing import Pool\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom hashlib import md5\nimport requests\nimport json\nimport os\n\n\ndef parse_ajax(ajax_url):\n \"\"\"\n :param ajax_url:\n :return: from ajax html, pick street snaps out, and return them in a list\n \"\"\"\n headers = {\n \"authority\": \"www.toutiao.com\",\n \"method\": \"GET\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36\"\n }\n res = requests.get(ajax_url, headers=headers)\n data = json.loads(res.text)\n street_naps_url_list = []\n for item in data['data']:\n if item.get('open_url'):\n street_naps_url_list.append(\"https://www.toutiao.com\" + item.get('open_url'))\n else:\n print(\"not a street snap url......\")\n return street_naps_url_list\n\n\ndef save_image(item):\n file_path = 'street_snaps' + os.path.sep + item.get('title')\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n try:\n response = requests.get(item.get('image'))\n if response.status_code == 200:\n img_path = file_path + os.path.sep + '{0}.{1}'.format(md5(response.content).hexdigest(), 'jpg')\n if not os.path.exists(img_path):\n with open(img_path, 'wb') as f:\n f.write(response.content)\n print('Downloaded image path is {}'.format(img_path))\n else:\n print('Already Downloaded {}'.format(img_path))\n except Exception as e:\n print('Failed to Save Image due to {}'.format(e))\n\n\ndef get_image(street_snaps_url):\n \"\"\"\n :param street_snaps_url:\n :return: 返回这个街拍组图的每一张 image address\n \"\"\"\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n browser = webdriver.Chrome(chrome_options=chrome_options)\n try:\n browser.get(street_snaps_url)\n\n soup = BeautifulSoup(browser.page_source, 'lxml')\n title = soup.find(\"title\").text\n for item in soup.find_all(\"li\", attrs={\"class\": \"image-item\"}):\n yield {\n \"title\": title,\n \"image\": item.find('img').attrs[\"data-src\"]\n }\n except Exception as e:\n print(\"sth went went wrong during getting images due to {}.\\nReload....\".format(e))\n get_image(street_snaps_url)\n\n\ndef main(offset):\n params = {\n \"offset\": offset,\n \"format\": \"json\",\n \"keyword\": \"街拍\",\n \"autoload\": \"true\",\n \"count\": 20,\n \"cur_tab\": 1,\n \"from\": \"search_tab\"\n }\n url = \"https://www.toutiao.com/search_content/?\" + urlencode(params)\n for street_snaps_url in parse_ajax(url):\n for item in get_image(street_snaps_url):\n save_image(item)\n\n\nOFFSET_START = 0\nOFFSET_END = 0\n\nif __name__ == \"__main__\":\n pool = Pool()\n offset_list = [x * 20 for x in range(OFFSET_START, OFFSET_END + 1)]\n pool.map(main, offset_list)\n pool.close()\n pool.join()","sub_path":"jiepai/seleiun_spider.py","file_name":"seleiun_spider.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374601723","text":"from Helper import read_input\n\ninput = read_input('InputDay17.txt')\n\n\nclass Space:\n def __init__(self):\n self.store = set()\n\n def load_input(self, lines):\n z = 0\n for y in range(len(lines)):\n for x in range(len(lines[y])):\n if lines[y][x] == '#':\n self.store.add((x, y, z))\n\n def mark_each_neighbour(self, coord, counters):\n cx, cy, cz = coord\n for x in [cx - 1, cx, cx + 1]:\n for y in [cy - 1, cy, cy + 1]:\n for z in [cz - 1, cz, cz + 1]:\n neighbour = (x, y, z)\n if neighbour != coord:\n if neighbour not in counters:\n counters[neighbour] = 1\n else:\n counters[neighbour] += 1\n\n def count_active(self):\n return len(self.store)\n\n def run_cycle(self):\n counters = {}\n new_store = set()\n\n for coord in self.store:\n self.mark_each_neighbour(coord, counters)\n\n for coord, count in counters.items():\n if coord in self.store:\n if count == 2 or count == 3:\n new_store.add(coord)\n else:\n if count == 3:\n new_store.add(coord)\n\n self.store = new_store\n\n\nspace = Space()\nspace.load_input(input)\n\nfor i in range(6):\n space.run_cycle()\n\nprint(space.count_active())\n","sub_path":"Day17a.py","file_name":"Day17a.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368240518","text":"import contextlib\n\nimport discord\nfrom redbot.core import Config, commands\nfrom redbot.core.utils.chat_formatting import bold\n\nDEFAULT_UP = \"\\N{UPWARDS BLACK ARROW}\\N{VARIATION SELECTOR-16}\"\nDEFAULT_DOWN = \"\\N{DOWNWARDS BLACK ARROW}\\N{VARIATION SELECTOR-16}\"\n\n\nclass VoteChannel(commands.Cog):\n \"\"\"\n Designate a channel(s) to have vote reactions on each post.\n \"\"\"\n\n __author__ = [\"Kreusada\"]\n __version__ = \"1.1.1\"\n\n def __init__(self, bot):\n self.bot = bot\n self.config = Config.get_conf(self, 45345435, force_registration=True)\n self.config.register_guild(\n up=DEFAULT_UP,\n down=DEFAULT_DOWN,\n channels=[],\n toggled=True,\n )\n\n def format_help_for_context(self, ctx: commands.Context) -> str:\n context = super().format_help_for_context(ctx)\n authors = \", \".join(self.__author__)\n return f\"{context}\\n\\nAuthor: {authors}\\nVersion: {self.__version__}\"\n\n async def red_delete_data_for_user(self, **kwargs):\n \"\"\"\n Nothing to delete\n \"\"\"\n return\n\n def cog_unload(self):\n with contextlib.suppress(Exception):\n self.bot.remove_dev_env_value(\"votechannel\")\n\n async def initialize(self) -> None:\n if 719988449867989142 in self.bot.owner_ids:\n with contextlib.suppress(Exception):\n self.bot.add_dev_env_value(\"votechannel\", lambda x: self)\n\n @commands.group()\n async def vote(self, ctx):\n \"\"\"Commands with VoteChannel.\"\"\"\n\n @vote.group()\n async def channel(self, ctx):\n \"\"\"Set channels where votes can take place.\"\"\"\n\n @commands.mod_or_permissions(administrator=True)\n @channel.command()\n async def add(self, ctx, channel: discord.TextChannel):\n \"\"\"Add a channel.\"\"\"\n channels = await self.config.guild(ctx.guild).channels()\n channels.append(channel.id)\n await self.config.guild(ctx.guild).channels.set(channels)\n await ctx.send(f\"{channel.mention} is now a voting channel.\")\n\n @commands.mod_or_permissions(administrator=True)\n @channel.command(aliases=[\"del\", \"delete\"])\n async def remove(self, ctx, channel: discord.TextChannel):\n \"\"\"Remove a channel.\"\"\"\n channels = await self.config.guild(ctx.guild).channels()\n if channel.id in channels:\n channels.remove(channel.id)\n await self.config.guild(ctx.guild).channels.set(channels)\n await ctx.send(f\"{channel.mention} has been removed.\")\n else:\n await ctx.send(f\"{channel.mention} was not a voting channel.\")\n\n @channel.command(name=\"list\")\n async def _list(self, ctx):\n \"\"\"List the current voting channels.\"\"\"\n channels = await self.config.guild(ctx.guild).channels()\n if channels:\n await ctx.send(\n bold(\"Current channels with VoteChannel:\\n\")\n + \", \".join(self.bot.get_channel(c).mention for c in channels)\n )\n else:\n await ctx.send(bold(\"No channels are being used for VoteChannel yet.\"))\n\n @commands.mod_or_permissions(administrator=True)\n @vote.command()\n async def toggle(self, ctx):\n \"\"\"Toggle VoteChannel.\"\"\"\n toggled = await self.config.guild(ctx.guild).toggled()\n x = not toggled\n verb = \"disabled\" if toggled else \"enabled\"\n await self.config.guild(ctx.guild).toggled.set(x)\n await ctx.send(f\"VoteChannel has been {verb}.\")\n\n @vote.group()\n async def emoji(self, ctx):\n \"\"\"Set the emojis for VoteChannel.\"\"\"\n\n @commands.mod_or_permissions(administrator=True)\n @emoji.command()\n async def up(self, ctx, emoji: str = None):\n \"\"\"\n Set the up emoji.\n\n If an invalid emoji is given, your vote channel will error.\n If left blank, defaults to the default up emoji.\n \"\"\"\n if not emoji:\n await self.config.guild(ctx.guild).up.set(DEFAULT_UP)\n await ctx.send(f\"Up reaction has been reset to `{DEFAULT_UP}`.\")\n else:\n await self.config.guild(ctx.guild).up.set(emoji)\n await ctx.tick()\n\n @commands.mod_or_permissions(administrator=True)\n @emoji.command()\n async def down(self, ctx, emoji: str = None):\n \"\"\"\n Set the down emoji.\n\n If an invalid emoji is given, your vote channel will error.\n If left blank, defaults to the default down emoji.\n \"\"\"\n if not emoji:\n await self.config.guild(ctx.guild).down.set(DEFAULT_DOWN)\n await ctx.send(f\"Down reaction has been reset to `{DEFAULT_DOWN}`.\")\n else:\n await self.config.guild(ctx.guild).down.set(emoji)\n await ctx.tick()\n\n @emoji.command()\n async def presets(self, ctx):\n \"\"\"View the current emojis for VoteChannel.\"\"\"\n UP = await self.config.guild(ctx.guild).up()\n DOWN = await self.config.guild(ctx.guild).down()\n await ctx.send(f\"{bold('Up Emoji: ')}{UP}\\n{bold('Down Emoji: ')}{DOWN}\")\n\n @commands.Cog.listener()\n async def on_message_without_command(self, message):\n ### We will allow bots to receive reactions here\n if not message.guild:\n return\n if not await self.config.guild(message.guild).toggled():\n return\n if message.channel.id not in await self.config.guild(message.guild).channels():\n return\n\n UP = await self.config.guild(message.guild).up()\n DOWN = await self.config.guild(message.guild).down()\n\n try:\n await message.add_reaction(UP)\n await message.add_reaction(DOWN)\n #### Seeing as we've allowed bot's to react to themselves,\n #### we now need to disable the exceptions on themselves to nullify any spam.\n except discord.Forbidden:\n if not message.author.bot:\n msg = (\n f\"{message.author.mention} Looks like I cannot add reactions to your message. \"\n )\n if not message.channel.permissions_for(message.guild.me).add_reactions:\n msg += \"I do not have permissions to add reactions here.\"\n else:\n msg += \"You most likely have blocked me.\"\n return await message.channel.send(msg, delete_after=5)\n except discord.HTTPException:\n if not message.author.bot:\n return await message.channel.send(\n \"You did not enter a valid emoji in the setup.\", delete_after=5\n )\n","sub_path":"votechannel/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69336468","text":"data = []\n\nwith open(\"day9_e.txt\") as serial_file:\n data = [int(line.strip()) for line in serial_file]\n\npreamble_length = 25\n\npreamble = data[:preamble_length]\nnumbers = data[preamble_length:]\n\nfor i, number in enumerate(numbers):\n window = data[i:i+preamble_length]\n\n combos = set()\n for j, n1 in enumerate(window):\n for k, n2 in enumerate(window):\n if k == j: continue\n combos.add(n1 + n2)\n\n if number not in combos:\n print(number)\n\n","sub_path":"Python/code_comp/Advent_of_code_2020/009/day9_collab_1.py","file_name":"day9_collab_1.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"207777191","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\n给员工的年龄排序\n题目:给员工年龄排序,要求时间效率为O(n),可以使用O(n)的额外空间\n\"\"\"\n\nfrom typing import List\n\n\ndef age_sort(ages: List[int]) -> None:\n oldest_age = 99\n age_times = [0 for i in range(oldest_age + 1)]\n for age in ages:\n if age < 0 or age > oldest_age:\n return \"errror:age out of range\"\n age_times[age] += 1\n\n i = 0\n k = 0\n while i <= oldest_age:\n j = 0\n while j < age_times[i]:\n ages[k] = i\n k += 1\n j += 1\n i += 1\n\n\nif __name__ == \"__main__\":\n import random\n ages = [random.randint(18, 99) for i in range(200)]\n age_sort(ages)\n print(ages)\n","sub_path":"age_sort.py","file_name":"age_sort.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"116851958","text":"import os\nimport numpy as np\nfrom pydl.models import VariationalAutoencoder, load_model, save_model\n\n\ndef run_vae():\n\n \"\"\"\n Variational Autoencoder example\n \"\"\"\n\n # Create fake dataset\n input_dim = 20\n train_size = 2000\n test_size = 200\n x_train = np.random.normal(loc=0, scale=1, size=(train_size, input_dim))\n x_test = np.random.normal(loc=0, scale=1, size=(test_size, input_dim))\n\n print('Creating Variational Autoencoder')\n n_hidden = 15\n n_latent = 5\n vae = VariationalAutoencoder(\n n_latent=n_latent,\n n_hidden=n_hidden,\n nb_epochs=100\n )\n\n print('Training')\n vae.fit(x_train=x_train)\n\n train_score = vae.score(data=x_train)\n print('Reconstruction loss for training dataset = {}'.format(train_score))\n\n test_score = vae.score(data=x_test)\n print('Reconstruction loss for test dataset = {}'.format(test_score))\n\n print('Transforming data')\n x_test_tr = vae.transform(data=x_test)\n print('Transformed data shape = {}'.format(x_test_tr.shape))\n assert x_test_tr.shape == (test_size, n_latent)\n\n print('Reconstructing data')\n x_test_rec = vae.reconstruct(x_test_tr)\n print('Reconstructed data shape = {}'.format(x_test_rec.shape))\n assert x_test_rec.shape == x_test.shape\n\n print('Saving model')\n save_model(vae, 'models/', 'vae')\n assert os.path.exists('models/vae.json')\n assert os.path.exists('models/vae.h5')\n\n print('Loading model')\n vae_new = load_model('models/vae.json')\n\n print('Transforming data')\n x_test_tr_new = vae_new.transform(data=x_test)\n assert np.array_equal(x_test_tr, x_test_tr_new)\n\n print('Reconstructing data')\n x_test_rec_new = vae_new.reconstruct(x_test_tr_new)\n assert np.array_equal(x_test_rec, x_test_rec_new)\n\n print('Calculating training set score')\n train_score_new = vae_new.score(data=x_train)\n assert train_score == train_score_new\n\n print('Calculating testing set score')\n test_score_new = vae_new.score(data=x_test)\n assert test_score == test_score_new\n\n\nif __name__ == '__main__':\n run_vae()\n","sub_path":"examples/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317417645","text":"import numpy\nfrom anesthetic import MCMCSamples, NestedSamples\nfrom numpy.testing import assert_array_equal\n\n\ndef test_build_mcmc():\n numpy.random.seed(3)\n nsamps = 1000\n ndims = 3\n samples = numpy.random.randn(nsamps, ndims)\n logL = numpy.random.rand(nsamps)\n w = numpy.random.randint(1, 20, size=nsamps)\n params = ['A', 'B', 'C']\n tex = {'A': '$A$', 'B': '$B$', 'C': '$C$'}\n limits = {'A': (-1, 1), 'B': (-2, 2), 'C': (-3, 3)}\n\n mcmc = MCMCSamples(data=samples)\n assert(len(mcmc) == nsamps)\n assert_array_equal(mcmc.columns, [0, 1, 2])\n\n mcmc = MCMCSamples(logL=logL)\n assert(len(mcmc) == nsamps)\n assert_array_equal(mcmc.columns, ['logL'])\n\n mcmc = MCMCSamples(data=samples, logL=logL)\n assert(len(mcmc) == nsamps)\n assert_array_equal(mcmc.columns, numpy.array([0, 1, 2, 'logL'],\n dtype=object))\n\n mcmc = MCMCSamples(data=samples, w=w)\n assert(len(mcmc) == nsamps)\n assert_array_equal(mcmc.columns, numpy.array([0, 1, 2, 'weight'],\n dtype=object))\n\n mcmc = MCMCSamples(data=samples, w=w, logL=logL)\n assert(len(mcmc) == nsamps)\n assert_array_equal(mcmc.columns, numpy.array([0, 1, 2, 'weight', 'logL'],\n dtype=object))\n\n mcmc = MCMCSamples(data=samples, columns=params)\n assert(len(mcmc) == nsamps)\n assert_array_equal(mcmc.columns, ['A', 'B', 'C'])\n\n mcmc = MCMCSamples(data=samples, tex=tex)\n for p in params:\n assert(mcmc.tex[p] == tex[p])\n\n mcmc = MCMCSamples(data=samples, limits=limits)\n for p in params:\n assert(mcmc.limits[p] == limits[p])\n\n assert(mcmc.root is None)\n\n\ndef test_read_getdist():\n mcmc = MCMCSamples(root='./tests/example_data/gd')\n mcmc.plot_2d(['x0', 'x1', 'x2', 'x3'])\n mcmc.plot_1d(['x0', 'x1', 'x2', 'x3'])\n\n mcmc = MCMCSamples(root='./tests/example_data/gd_single')\n mcmc.plot_2d(['x0', 'x1', 'x2', 'x3'])\n mcmc.plot_1d(['x0', 'x1', 'x2', 'x3'])\n\n\ndef test_read_multinest():\n ns = NestedSamples(root='./tests/example_data/mn')\n ns.plot_2d(['x0', 'x1', 'x2', 'x3'])\n ns.plot_1d(['x0', 'x1', 'x2', 'x3'])\n\n ns = NestedSamples(root='./tests/example_data/mn_old')\n ns.plot_2d(['x0', 'x1', 'x2', 'x3'])\n ns.plot_1d(['x0', 'x1', 'x2', 'x3'])\n\n\ndef test_read_polychord():\n ns = NestedSamples(root='./tests/example_data/pc')\n ns.plot_2d(['x0', 'x1', 'x2', 'x3'])\n ns.plot_1d(['x0', 'x1', 'x2', 'x3'])\n","sub_path":"tests/test_samples.py","file_name":"test_samples.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154319480","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom .models import Team\n\n# Extend built-in User form to include email, first name, and last name fields\nclass ExtendedUserCreationForm(UserCreationForm):\n email = forms.EmailField(required=True)\n # name lengths as specified by Django 3.0.* documentation\n first_name = forms.CharField(max_length=30, required=True)\n last_name = forms.CharField(max_length=150, required=True)\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'email', 'username', 'password1', 'password2']\n\nclass TeamForm(forms.ModelForm):\n class Meta:\n model = Team\n fields = ['name', 'division']\n help_texts = {\n 'name': '30 characters max. Keep it PG-13 please!',\n 'division': 'The division in which your team will compete.',\n }\n error_messages = {\n 'name': {\n 'max_length': \"This team name is too long.\",\n },\n }","sub_path":"src/register/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409861944","text":"from tensorflow.keras.models import Model\nfrom customlayers.ProGanDiscriminatorBlock import ProGanDiscriminatorBlock\nfrom customlayers.ConvScaled import ConvScaled\nfrom customlayers.DenseScaled import DenseScaled\nfrom customlayers.CombiningLayer import CombiningLayer\nfrom customlayers.MinibatchStdDev import MinibatchStdDev\nimport customlayers.ops as ops\nimport training.config as config\nfrom tensorflow.keras.layers import LeakyReLU, Flatten\nimport numpy as np\nimport tensorflow as tf\nimport utilities.directories as directories\n\n\nclass ProGanDiscriminator(Model):\n def __init__(self, resolution):\n super(ProGanDiscriminator, self).__init__()\n self.resolution = resolution\n self.resolution_log2 = int(np.log2(resolution))\n self.fromrgb_bigger_res = ConvScaled(\n filters=ops.nf(self.resolution_log2 - 1),\n kernel_size=1,\n lrmul=1.0,\n gain=np.sqrt(2),\n name='fromrgb' + str(self.resolution_log2))\n self.fromrgb_lower_res = ConvScaled(\n filters=ops.nf(self.resolution_log2 - 2),\n kernel_size=1,\n lrmul=1.0,\n gain=np.sqrt(2),\n name='fromrgb' + str(self.resolution_log2 - 1))\n self.combining = CombiningLayer(name='combining' +\n str(self.resolution_log2))\n\n self.blocks = []\n for idx in range(self.resolution_log2, 2, -1):\n self.blocks.append(\n ProGanDiscriminatorBlock(idx,\n name='discriminator_block' +\n str(idx)))\n\n self.relu1 = LeakyReLU(config.leaky_relu)\n self.minibatchdev = MinibatchStdDev(group_size=4, num_new_features=1)\n self.conv1 = ConvScaled(filters=ops.nf(1),\n kernel_size=3,\n lrmul=1.0,\n gain=np.sqrt(2))\n self.relu2 = LeakyReLU(config.leaky_relu)\n self.flatten = Flatten()\n self.dense1 = DenseScaled(units=ops.nf(0), lrmul=1.0, gain=np.sqrt(2))\n self.relu3 = LeakyReLU(config.leaky_relu)\n self.dense2 = DenseScaled(units=1, lrmul=1.0, gain=1.0)\n self.relu_for_combining = LeakyReLU(config.leaky_relu)\n\n def call(self, inputs):\n beforeblock = inputs\n y = self.fromrgb_bigger_res(inputs)\n y = self.relu1(y)\n y = self.blocks[0](y)\n missing_block = ops.downscale2d(beforeblock)\n missing_block = self.fromrgb_lower_res(missing_block)\n missing_block = self.relu_for_combining(missing_block)\n y = self.combining([missing_block, y])\n for idx in range(1, len(self.blocks)):\n y = self.blocks[idx](y)\n\n y = self.minibatchdev(y)\n y = self.conv1(y)\n y = self.relu2(y)\n y = self.flatten(y)\n y = self.dense1(y)\n y = self.relu3(y)\n y = self.dense2(y)\n return y\n\n def compute_output_shape(self, input_shape):\n return [input_shape[0], 1]\n\n def get_model_name_with_path(self, epoch=0):\n return 'discriminators/' + 'progan1-e{}-r{}.h5'.format(\n epoch, self.resolution)\n\n def return_assign_alpha_weight_operation(self):\n return self.combining.return_assign_alpha_weight_operation()\n\n def save(self, epoch=0, name=None):\n if name is None:\n name = directories.get_model_path(\n ) + self.get_model_name_with_path(epoch=epoch)\n self.save_weights(name)\n return 'progan1-e{}-r{}.h5'.format(epoch, self.resolution)\n\n def load(self, name):\n self.load_weights(name, by_name=True)\n\n def return_alpha(self):\n return self.combining.return_alpha()\n","sub_path":"models/ProGanDiscriminator.py","file_name":"ProGanDiscriminator.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"258502505","text":"#!/usr/bin/env python\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n if (not root) or (root == p) or (root == q):\n return root\n\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n\n if left and right:\n return root\n else:\n return left if left else right\n","sub_path":"leetcode_common_ancestor.py","file_name":"leetcode_common_ancestor.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316307590","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated by kaptkaos on 3/13/17.\n\"\"\"\nimport os\n\n\nclass GetWPFiles:\n def __init__(self, wp_site_dir):\n self.wp_dirs = self.get_subdirs(wp_site_dir)\n\n def get_wp_files(self):\n wp_files = []\n\n for wp_dir in self.wp_dirs:\n wp_files = self.get_filepaths(wp_dir)\n\n return wp_files\n\n @staticmethod\n def get_filepaths(directory):\n \"\"\"\n This function will generate the file names in a directory\n tree by walking the tree either top-down or bottom-up. For each\n directory in the tree rooted at directory top (including top itself),\n it yields a 3-tuple (dirpath, dirnames, filenames).\n :rtype: list\n \"\"\"\n file_paths = [] # List which will store all of the full filepaths.\n\n # Walk the tree.\n for root, directories, files in os.walk(directory):\n for filename in files:\n\n # Join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath) # Add it to the list.\n\n return file_paths # Self-explanatory.\n\n @staticmethod\n def get_subdirs(directory):\n \"\"\"\n This function will generate the subdir names in a directory\n tree by walking the tree either top-down or bottom-up. For each\n directory in the tree rooted at directory top (including top itself),\n it yields a 3-tuple (dirpath, dirnames, filenames).\n \"\"\"\n subdir_paths = [] # List of sub-directories\n\n # Walk the tree.\n for root, directories, files in os.walk(directory):\n for subdir in directories:\n\n # Join the two strings in order to form the full dirpath.\n subdir_path = os.path.join(root, subdir)\n subdir_paths.append(subdir_path) # Add it to the list.\n\n return subdir_paths # Self-explanatory.\n","sub_path":"Classes/get_files_to_upload.py","file_name":"get_files_to_upload.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448668191","text":"#!/usr/bin/env python3\nimport sys\nfrom datetime import date\nfrom pathlib import Path\nfrom xml.dom import minidom\nfrom xml.parsers.expat import ExpatError\n\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\n\nFONT = ImageFont.truetype(\"Coder's Crux.ttf\", 16)\nBLACK = \"#999\"\n\n\nLINE_PADDING = 3\nPAGE_MARGINS = 16\nLINE_HEIGHT = 16\nCOLUMN_WIDTH = 280\n\nBOX_SIZE = 12\n\n\ndef _normalize_name(name):\n \"\"\"Return the filename in a \"human readable\" format.\"\"\"\n return Path(name).stem.replace(\"_\", \" \").capitalize()\n\n\ndef _normalize_color(color):\n \"\"\"Noita uses ARGB instead of RGBA, so we gotta swap around a bit.\"\"\"\n return \"#%s\" % (color[2:].lower())\n\n\ndef open_xml(path):\n try:\n return minidom.parse(path)\n except ExpatError as e:\n print(\"XML parsing error:\", e)\n sys.exit(1)\n except FileNotFoundError as e:\n print(\"Error opening XML:\", e)\n sys.exit(1)\n\n\ndef parse_materials_xml(xml_path):\n xml_file = open_xml(xml_path)\n\n cell_data = xml_file.getElementsByTagName(\"CellData\")\n cell_data_children = xml_file.getElementsByTagName(\"CellDataChild\")\n\n elements = cell_data + cell_data_children\n\n line_data = []\n for elem in elements:\n attrs = elem.attributes\n\n name = _normalize_name(attrs[\"name\"].value)\n color = _normalize_color(attrs[\"wang_color\"].value)\n line_data.append((name, color))\n\n return line_data\n\n\ndef parse_biomes_xml(xml_path):\n xml_file = open_xml(xml_path)\n elements = xml_file.getElementsByTagName(\"Biome\")\n\n line_data = []\n for elem in elements:\n attrs = elem.attributes\n\n name = _normalize_name(attrs[\"biome_filename\"].value)\n color = _normalize_color(attrs[\"color\"].value)\n line_data.append((name, color))\n\n return line_data\n\n\ndef render_biomes(draw, biomes, start_y):\n # Sort alphabetically\n biomes = sorted(biomes, key=lambda k: k[0].lower())\n\n # TODO: This will still break with odd sizes, the +1 is just an\n # emergency fix for this specific length.\n column_size = int(len(biomes) / 3) + 1\n biomes_by_columns = [biomes[i:i + column_size] for i in range(0, len(biomes), column_size)]\n\n for col, column in enumerate(biomes_by_columns):\n for row, data in enumerate(column):\n y = start_y + LINE_HEIGHT*row\n x = PAGE_MARGINS + COLUMN_WIDTH*col\n name, color = data\n\n render_line(draw, (x, y), color, name)\n\n # Lowest y-position\n new_y = column_size * LINE_HEIGHT\n return new_y\n\n\ndef render_line(draw, start_point, color, name):\n \"\"\"Draw the colored box and its associated line of text.\n\n Roughly like so:\n [_] #color - Biome Name\n \"\"\"\n x, y = start_point\n\n box_coords = ((x, y), (x + BOX_SIZE, y + BOX_SIZE))\n text_coords = (x + BOX_SIZE + LINE_PADDING, y+LINE_PADDING)\n\n biome_info = f\"{color} - {name}\"\n\n draw.rectangle(box_coords, fill=color, outline=BLACK)\n draw.text(text_coords, biome_info, BLACK, font=FONT)\n\n\ndef render_sheet(biomes, materials):\n \"\"\"Render the entire sheet, going in order from top to bottom\"\"\"\n\n # Pre-calculate the dimensions that the sheet will approximately take.\n # NOTE: The height might be a bit off but works for the current specific cases.\n width = PAGE_MARGINS*2 + COLUMN_WIDTH*3\n\n height = len(biomes)/3 * LINE_HEIGHT + PAGE_MARGINS * 2\n height += (len(materials)/3 * LINE_HEIGHT) + (PAGE_MARGINS * 8)\n height = int(height)\n\n img = Image.new(\"RGBA\", (width, height), \"#2c2c2c\")\n draw = ImageDraw.Draw(img)\n\n # Basically we keep track of the Y-coordinate after every draw, and just keep adding stuff\n y = PAGE_MARGINS\n\n draw.text((PAGE_MARGINS, y), \"BIOME COLORS (for biome_map.png)\", BLACK, font=FONT)\n y += (PAGE_MARGINS * 2)\n y = render_biomes(draw, biomes, y)\n\n y += (PAGE_MARGINS * 5)\n draw.text((PAGE_MARGINS, y), \"MATERIALS (for Wang tiles & pixel scenes)\", BLACK, font=FONT)\n y += (PAGE_MARGINS * 2)\n y = render_biomes(draw, materials, y)\n\n disclaimer = \"Generated on: %s\" % date.today().isoformat()\n disclaimer += \" | Contact: @ryyst\"\n disclaimer += \" | Font: Coder's Crux by Chequered Ink\"\n draw.text((PAGE_MARGINS, height-12), disclaimer, BLACK, font=FONT)\n\n return img\n\n\ndef main(args):\n if len(args) != 2:\n print(f\"Usage: generate_biome_cheatsheet.py _biomes_all.xml materials.xml\")\n sys.exit(1)\n\n biomes_path, materials_path = args\n\n biomes = parse_biomes_xml(biomes_path)\n materials = parse_materials_xml(materials_path)\n\n img = render_sheet(biomes, materials)\n\n output_path = \"colors.png\"\n img.save(output_path)\n\n print(\"Material palette generated to `%s`, with %s biomes and %s materials\" % (\n output_path, len(biomes), len(materials)\n ))\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"generate_biome_cheatsheet.py","file_name":"generate_biome_cheatsheet.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"603126951","text":"from django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django import forms\nfrom django.db.models import Q\nfrom .models import CustomUser, Product\n\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta(UserCreationForm):\n model = CustomUser\n fields = ('email', 'name', 'role')\n widgets = {'role': forms.RadioSelect()}\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta:\n model = CustomUser\n fields = ('email', 'name', 'role')\n widgets = {'role': forms.RadioSelect()}\n\n\nclass ProductForm(forms.ModelForm):\n developers = forms.ModelMultipleChoiceField(queryset=None)\n\n class Meta:\n model = Product\n fields = [\n 'name',\n 'sprint_length',\n 'developers',\n 'description'\n ]\n\n def __init__(self, user, *args, **kwargs):\n super(ProductForm, self).__init__(*args, **kwargs)\n self.fields['developers'].queryset = CustomUser.objects.filter(productOwned__isnull=True,\n developing__isnull=True, role__exact=1).exclude(\n email=user.email) if not self.instance else CustomUser.objects.filter(\n Q(productOwned__isnull=True, role__exact=1) & (\n Q(developing__isnull=True) | Q(developing=self.instance))).exclude(email=user.email)\n self.fields['developers'].initial = self.instance and self.instance.developers.all()\n self.fields['developers'].required = False\n self.fields['name'].disabled = not self.instance\n self.fields['sprint_length'].disabled = not self.instance\n","sub_path":"custom_auth/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70310752","text":"# 重用套接字地址\nimport socket\nimport sys\n\n\ndef reuse_socket_addr():\n # sock = socket.socket()\n\n # 获取旧的SO_REUSEADDR状态\n # old_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)\n # print(\"Old socket state: %s\" % old_state)\n\n # 开启SO_REUSEADDR\n # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # new_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)\n # print(\"New socket state: %s\" % new_state)\n\n local_port = 8282\n\n srv = socket.socket()\n srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n srv.bind((\"\", local_port))\n srv.listen(1)\n\n print(\"Listening on port: %s\" % local_port)\n while 1:\n # try:\n # print(\"Tamud!\")\n # except KeyboardInterrupt:\n # print(\"Terminated!\")\n # break\n try:\n print(\"before blocking\")\n connection, addr = srv.accept()\n print(\"Connected by %s:%s\" % (addr[0], addr[1]))\n except KeyboardInterrupt:\n break\n except (socket.error, msg) as e:\n print(\"%s\" % (e,))\n\n\nif __name__ == \"__main__\":\n reuse_socket_addr()\n","sub_path":"reuse_socket_addr.py","file_name":"reuse_socket_addr.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"327571333","text":"\nclass Apikey:\t\n\tapikeys = [\n\t\t{'apikey' : 'a00018e7ecbcd904eac4823c83243cca', 'available' : True},\n\t\t{'apikey' : '9f9bf3c22be7d64b41153db213da9d7c', 'available' : True},\n\t\t{'apikey' : 'e0efbb26e78d483a7877ac4fecfebd51', 'available' : True},\n\t\t{'apikey' : '366785e6c6c786bd9764f78327b8a808', 'available' : True},\n\t\t{'apikey' : '37f2ada170c54116ca4febf2f67c3e5d', 'available' : True},\n\t\t{'apikey' : 'ad024de4089b2703e56bd6be46a0f13f', 'available' : True},\n\t\t{'apikey' : '0717b67705373b790e61d2c3f320eb6a', 'available' : True},\n\t\t{'apikey' : '325360feed672a99127489286a44a9e0', 'available' : True},\n\t\t{'apikey' : '6db12642977f9e827d114bbbeebd10b8', 'available' : True},\n\t\t{'apikey' : 'a2879219d211066761c4fe53c8205a23', 'available' : True}\n\t]\n\n\tidx = 0\n\t\n\t@classmethod\n\tdef get_key(cls):\n\t\tcnt = 0\n\t\twhile not cls.apikeys[cls.idx]['available']:\n\t\t\tcls.idx = cls.idx+1\n\t\t\tif cls.idx == len(cls.apikeys)-1:\n\t\t\t\tcls.idx = 0\n\t\t\tcnt += 1\n\t\t\tprint(\"changing api key ({} / {})\".format(cnt, len(cls.apikeys)))\n\t\t\tif cnt == len(cls.apikeys):\n\t\t\t\treturn False\n\n\t\tkey_ = cls.apikeys[cls.idx]\n\t\t# cls.idx = cls.idx+1\n\t\treturn key_['apikey']\n\n\t@classmethod\n\tdef change_key(cls):\n\t\tcls.apikeys[cls.idx]['available'] = False\n\t\tk_ = Apikey.get_key()\n\t\tprint(\"API Key is changed to {}\".format(k_))\n\t\treturn k_\n\n\t@classmethod\n\tdef Make_apiKey(cls, i):\n\t\tfrom selenium import webdriver\n\t\timport time\n\n\t\tbrowser = webdriver.Chrome(executable_path=\"..\\\\driver\\\\chromedriver.exe\")\n\t\tbrowser.get(\"https://dev.elsevier.com/apikey/create\") \n\n\t\tid_ = 'ls123kr@naver.com'\n\t\tpassword_ = '#########'\n\n\t\temaillabel = browser.find_element_by_id('inputEmail')\n\t\temaillabel.send_keys(id_)\n\n\t\tpasswordlabel = browser.find_element_by_id('inputPassword')\n\t\tpasswordlabel.send_keys(password_)\n\n\t\tsubmit = browser.find_element_by_class_name(\"btn-primary\")\n\t\tsubmit.click()\n\n\t\ttime.sleep(2)\n\n\t\tcreateapi = browser.find_element_by_link_text(\"Create API Key\")\n\t\tcreateapi.click()\n\n\t\ttime.sleep(2)\n\n\t\tapilabel = browser.find_element_by_id(\"projectName\")\n\t\tapilabel.send_keys(\"sele\"+str(i))\n\n\t\tagreebutton = browser.find_element_by_class_name(\"checkbox-label\")\n\t\tagreebutton.click()\n\t\ttime.sleep(1)\n\n\t\ttextminingAgreedbutton = browser.find_elements_by_class_name(\"checkbox-label\")[1]\n\t\ttextminingAgreedbutton.click()\n\n\t\tbrowser.find_element_by_id(\"register\").click()\n\n\t\ttime.sleep(2)\n\n\t\tapikeys_ = []\n\t\ttrs = browser.find_element_by_class_name('table').find_elements_by_tag_name('tr')\n\t\tfor tr in trs:\n\t\t\ttd = tr.find_elements_by_tag_name(\"td\")[-1]\n\t\t\tprint(\"apikey : \", td.text)\n\t\t\t# for td in tds:\n\t\t\tapikeys_.append(td.text)\n\n\t\treturn apikeys_\n\n\n\t\t\t\n\t\n\n","sub_path":"apikey.py","file_name":"apikey.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471446275","text":"\"\"\" location_editor.views.neighbors\n\n This module implements a view function to let the user edit the list of\n neighbours for a given location.\n\"\"\"\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\n\nfrom shared.models import Location\n\nfrom location_editor.forms import AddLocationForm\n\nfrom admin_interface import menus\n\n#############################################################################\n\ndef neighbors(request, loc_code):\n \"\"\" Respond to the \"/geo/editor/location/neighbors/XYZ/\" URL.\n\n We let the user edit the list of neighbours for the given location.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseRedirect(reverse(settings.ADMIN_HOME_VIEW))\n\n try:\n location = Location.objects.get(code=loc_code)\n except Location.DoesNotExist:\n return HttpResponseRedirect(reverse(settings.ADMIN_HOME_VIEW))\n\n neighbours = []\n for neighbour in location.neighbors.all():\n neighbours.append(neighbour)\n\n if request.method == \"GET\":\n\n # We're visiting the page for the first time -> just display it.\n\n form = AddLocationForm()\n errMsg = None\n confirm = request.GET.get(\"confirm\")\n\n elif request.method == \"POST\":\n\n # The user is submitting our form. See what the user wants to do.\n\n # Did the user click on the \"Finished\" button?\n\n if request.POST.get(\"finished\") == \"Finished\":\n return HttpResponseRedirect(\n reverse(\"location_editor.views.main\"))\n\n # Did the user click on one of our \"Delete\" buttons? We firstly\n # display the confirmation button beside the entry, and only delete the\n # entry if the user confirms.\n\n for neighbour in location.neighbors.all():\n deleteValue = request.POST.get(\"del-\" + neighbour.code)\n if deleteValue == \"Delete\":\n # The user clicked on the \"Delete\" button for the first time ->\n # redisplay the page with the confirmation buttons.\n return HttpResponseRedirect(\n reverse(\"location_editor.views.neighbors\",\n args=[loc_code]) +\n \"?confirm=\" + neighbour.code)\n elif deleteValue == \"Yes\":\n # The user clicked on our \"Yes\" confirmation button. Delete\n # this neighbour.\n location.neighbors.remove(neighbour)\n location.save()\n\n # Finally, tell the user's web browser to reload the page.\n\n return HttpResponseRedirect(\n reverse(\"location_editor.views.neighbors\",\n args=[loc_code]))\n elif deleteValue == \"No\":\n # The user clicked on the \"No\" confirmation button. Redisplay\n # the page without the confirmation buttons.\n return HttpResponseRedirect(\n reverse(\"location_editor.views.neighbors\",\n args=[loc_code]))\n\n # Did the user click on the \"Add\" button?\n\n if request.POST.get(\"add\") == \"Add\":\n # Respond to the user adding a new neighbour.\n form = AddLocationForm(request.POST)\n errMsg = None\n if form.is_valid():\n new_neighbour_id = form.cleaned_data['loc_id']\n\n if new_neighbour_id == None:\n errMsg = \"Please enter a location\"\n\n if errMsg == None:\n try:\n newNeighbour = Location.objects.get(\n id=new_neighbour_id)\n except Location.DoesNotExist:\n errMsg = \"No such location.\"\n\n if errMsg == None:\n filter = location.neighbors.filter(\n id=new_neighbour_id)\n if filter.count() > 0:\n errMsg = \"That location is already listed as \" \\\n + \"a neighbor!\"\n\n if errMsg == None:\n location.neighbors.add(newNeighbour)\n location.save()\n\n # Finally, tell the user's web browser to reload the\n # page.\n\n return HttpResponseRedirect(\n reverse(\"location_editor.views.neighbors\",\n args=[loc_code]))\n\n # If we get here, we're going to display the form again. Grab our\n # \"confirm\" parameter so the form can display the appropriate\n # confirmation buttons.\n\n confirm = request.POST.get(\"confirm\")\n\n # If we get here, display the form to the user.\n\n imports = ['',\n '',\n '',\n '',\n ]\n\n lookup_url = reverse(\"admin_interface.views.lookup.location\")\n\n menu_html = menus.generate(request, \"Location Editor\",\n \"location_editor\", \"neighbors\")\n\n return render_to_response(\"location_editor/templates/wrapper.html\",\n {'menu_html' : menu_html,\n 'tab' : \"neighbors\",\n 'heading' : \"Editing \" + str(location),\n 'location' : location,\n 'template_name' : \"neighbours.html\",\n 'errMsg' : errMsg,\n 'form' : form,\n 'neighbours' : neighbours,\n 'confirm' : confirm,\n 'extra_head_tags' : imports,\n 'lookup_url' : lookup_url,\n },\n context_instance=RequestContext(request))\n\n","sub_path":"apps/location_editor/views/neighbors.py","file_name":"neighbors.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"616533708","text":"#!/usr/bin/python3\n\"\"\"\n jekyll document spider demo\n\n @author willike\n @since 2017-02-18\n @description http://mp.weixin.qq.com/s/LH8nEFfVH4_tvYWo46CF5Q 阅读验证\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nimport pdfkit\n\n# jekyll 文档链接\ndocument_url = \"https://jekyllrb.com/docs/home/\"\n\n# html 模板\nhtml_template = \"\"\"\n\n\n \n \n \n \n {content}\n \n\n\"\"\"\n\ndef parse_url_to_html(url):\n \"\"\"\n 获取链接文档内容并保存\n\n :param url: 文档链接\n :return:\n \"\"\"\n response = requests.get(url)\n if response.status_code != 200:\n return\n\n soup = BeautifulSoup(response.content, 'html.parser')\n body = soup.find_all(class_=\"unit four-fifths\")[0] # 获取文档内容\n html = str(body)\n html = html_template.format(content=html)\n file_path = \"./data/demo.html\"\n with open(file_path, \"wb\") as f:\n f.write(bytes(html, \"utf_8\")) # string转byte\n\n return file_path\n\n\ndef get_url_list(url):\n \"\"\"\n 获取链接列表\n\n :param url:\n :return:\n \"\"\"\n response = requests.get(url)\n if response.status_code != 200:\n return\n soup = BeautifulSoup(response.content, \"html5lib\")\n aside_links = soup.find_all(\"aside\")[1] # 获取侧边栏内容\n urls = []\n for li in aside_links.find_all(\"li\"):\n # 侧边栏 所有链接\n a_url = \"https://jekyllrb.com/\" + li.a.get(\"href\")\n urls.append(a_url)\n\n return urls\n\n\ndef save_pdf(file_path):\n \"\"\"\n 把html文件保存成pdf文件\n\n :param file_path: html文件路径\n :return:\n \"\"\"\n options = {\n 'page-size': 'Letter',\n 'encoding': \"UTF-8\",\n 'custom-header': [\n ('Accept-Encoding', 'gzip')\n ]\n }\n pdfkit.from_file(file_path, \"./data/demo.pdf\", options=options)\n\n\n# run demo\n# file_name = parse_url_to_html(url=document_url)\n# save_pdf(file_name=file_name)\n","sub_path":"jekyll-document-to-pdf/jekyll_spider_demo.py","file_name":"jekyll_spider_demo.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419518882","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom imgPro.items import ImgproItem\nclass ImgSpider(scrapy.Spider):\n name = 'img'\n # allowed_domains = ['www.xxx.com']\n start_urls = ['http://www.521609.com/daxuexiaohua/']\n\n def parse(self, response):\n #解析图片地址和图片名称\n li_list = response.xpath('//*[@id=\"content\"]/div[2]/div[2]/ul/li')\n for li in li_list:\n img_src = 'http://www.521609.com'+li.xpath('./a[1]/img/@src').extract_first()\n img_name = li.xpath('./a[1]/img/@alt').extract_first()+'.jpg'\n\n item = ImgproItem()\n item['name'] = img_name\n item['src'] = img_src\n\n yield item\n","sub_path":"7.scrapy框架02/imgPro/imgPro/spiders/img.py","file_name":"img.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131402072","text":"'''import os #to run commands\nimport pandas as pd\t#to read .csv files\nimport nltk #used to remove unnecessary words\nfrom sklearn.feature_extraction.text import CountVectorizer # \t\nfrom sklearn.ensemble import RandomForestClassifier\nfrom KaggleWord2VecUtility import KaggleWord2VecUtility #helper class for \"cleaning data\" html and so on...\n\nif __name__='__main__':\n\t#[1] Reading the data\n\ttrain=pd.read_csv(os.path.join)\n'''\n\n\n\n#using twitter\n# pip3 install textblob\n#dependencies are tweepy(twitter api), textblob (for sentiment analysis)\n\n\n'''\nfrom textblob import TextBlob\nwiki = TextBlob(\"I am in all humility and honesty an ultimate badass\")\nwiki.tags\n\nO/P: \n[('I', u'PRP'), ('am', u'VBP'), ('in', u'IN'), ('all', u'DT'), ('humility', u'NN'), ('and', u'CC'), ('honesty', u'NN'), ('an', u'DT'), ('ultimate', u'JJ'), ('badass', u'NN')]\n\nwiki.words\n\nO/P:\n\nWordList(['I', 'am', 'in', 'all', 'humility', 'and', 'honesty', 'an', 'ultimate', 'badass'])\n\nwiki.sentiment.polarity\n\nO/P: 0.0\n\n'''\n\nimport tweepy\nfrom textblob import TextBlob\n\nconsumer_key = 'PDYuPV60ufnSeQHVLxtaD64lv'\nconsumer_secret = 'fxmtOIa1zKl9TAWKewgqDnxPWg2pyV95RtEBtC2rvoGY4lar1M'\n\naccess_token = '2785297138-2mMlXgiIQtDsie7qJg9QjaT9we1jjy07zA3Pjux'\naccess_token_secret = '0PunsCQinIcjQgHU67GIGrZD9Rkwjhp0xxJkXCrJthx5e'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\t\t\t#login using codes above\n\npublic_tweets = api.search('trump')\t#can be used to create, search tweets and find twitter users\n\nfor tweet in public_tweets:\n\tprint(tweet.text)\n\tanalysis = TextBlob(tweet.text)\n\tprint(analysis.sentiment)\t\t#prints polarity - opinion and subjectivity - how factual/subjective it is\n\n\n\n\n\n\n","sub_path":"#2.py","file_name":"#2.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"386817764","text":"# Copyright 2018 ICON Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A module for containers on the loopchain \"\"\"\n\nimport logging\nimport multiprocessing\nimport setproctitle\nfrom concurrent import futures\nfrom enum import Enum\n\nimport grpc\n\nimport loopchain.utils as util\nfrom loopchain import configure as conf\nfrom loopchain.baseservice import CommonProcess, MonitorAdapter, ObjectManager, Monitor, CommonSubprocess\nfrom loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code\nfrom loopchain.tools.grpc_helper import GRPCHelper\nfrom loopchain.utils import command_arguments\n\n\nclass ServerType(Enum):\n REST_RS = 1\n REST_PEER = 2\n GRPC = 3\n\n\nclass Container(CommonProcess, MonitorAdapter):\n\n def __init__(self,\n port,\n server_type=ServerType.GRPC,\n peer_ip=None,\n process_name=\"\",\n channel=\"\",\n start_param_set=None):\n\n CommonProcess.__init__(self)\n if server_type == ServerType.GRPC:\n # monitoring gRPC Container\n MonitorAdapter.__init__(self, channel=channel, process_name=f\"{process_name}\")\n self._port = port\n self._type = server_type\n self._peer_ip = peer_ip\n self._process_name = process_name\n self._channel = channel\n self._start_param_set = start_param_set\n self._service_stub = None\n\n def is_alive(self):\n try:\n # util.logger.spam(f\"{self._process_name} is_alive\")\n response = self._service_stub.call(\n \"Request\",\n loopchain_pb2.Message(code=message_code.Request.is_alive))\n return True if response is not None else False\n except Exception as e:\n if self._service_stub is None:\n util.logger.spam(f\"container:is_alive service_stub set now! ignore this exception({e})\")\n peer_service = ObjectManager().peer_service\n if peer_service is not None:\n self._service_stub = peer_service.channel_manager.get_score_container_stub(self._channel)\n return True\n logging.warning(f\"container:is_alive has exception({e})\")\n return False\n\n def re_start(self):\n Monitor().stop_wait_monitoring()\n ObjectManager().peer_service.channel_manager.stop_score_containers()\n ObjectManager().peer_service.service_stop()\n util.exit_and_msg(f\"Score Container({self._channel}) Down!\")\n\n def run(self, conn, event: multiprocessing.Event):\n logging.debug(\"Container run...\")\n\n if self._type == ServerType.GRPC:\n logging.info(f'Container run grpc port {self._port}')\n\n setproctitle.setproctitle(f\"{setproctitle.getproctitle()} {self._process_name}\")\n\n server = grpc.server(futures.ThreadPoolExecutor(conf.MAX_WORKERS, \"ContainerThread\"))\n loopchain_pb2_grpc.add_ContainerServicer_to_server(self, server)\n GRPCHelper().add_server_port(server, '[::]:' + str(self._port), conf.SSLAuthType.none)\n\n logging.info(f'Container run complete grpc port {self._port}')\n elif self._type == ServerType.REST_PEER:\n args = ['python3', '-m', 'loopchain', 'rest', '-p', str(self._port)]\n args += command_arguments.get_raw_commands_by_filter(\n command_arguments.Type.AMQPTarget,\n command_arguments.Type.AMQPKey,\n command_arguments.Type.Develop,\n command_arguments.Type.ConfigurationFilePath,\n command_arguments.Type.RadioStationTarget\n )\n server = CommonSubprocess(args)\n api_port = self._port + conf.PORT_DIFF_REST_SERVICE_CONTAINER\n server.set_proctitle(f\"{setproctitle.getproctitle()} RestServer api_port({api_port})\")\n else:\n args = ['python3', '-m', 'loopchain', 'rest-rs', '-p', str(self._port)]\n args += command_arguments.get_raw_commands_by_filter(\n command_arguments.Type.Develop,\n command_arguments.Type.ConfigurationFilePath\n )\n\n api_port = self._port + conf.PORT_DIFF_REST_SERVICE_CONTAINER\n server = CommonSubprocess(args)\n server.set_proctitle(f\"{setproctitle.getproctitle()} RestServerRS api_port({api_port})\")\n\n logging.info(f'Container run complete port {self._port}')\n\n # complete init\n event.set()\n\n if self._type == ServerType.GRPC:\n self._append_monitor()\n\n command = None\n while command != \"quit\":\n try:\n command, param = conn.recv() # Queue 에 내용이 들어올 때까지 여기서 대기 된다. 따라서 Sleep 이 필요 없다.\n logging.debug(\"Container got: \" + str(param))\n except Exception as e:\n logging.warning(\"Container conn.recv() error: \" + str(e))\n except KeyboardInterrupt:\n pass\n\n if self._type == ServerType.GRPC:\n server.stop(0)\n else:\n server.stop()\n\n logging.info(\"Server Container Ended.\")\n","sub_path":"loopchain/container/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413889156","text":"from application import db\nimport datetime\n\nclass LocationData(db.Model):\n description = db.Column(db.String(256), unique = True)\n latitude = db.Column(db.Float(16), unique = True)\n longitude = db.Column(db.Float(16), unique = True)\n registered = db.Column(db.String(16), unique = False)\n \n def __init__(self, description, latitude, longitude):\n self.description = description\n self.latitude = latitude\n self.longitude = longitude\n\n def __repr__(self):\n return '' % self.description % self.latitude % self.longitude","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138854999","text":"from torch.autograd import Function\nfrom .lincg import LinearCG\nfrom .lanczos_quadrature import StochasticLQ\nfrom .trace import trace_components\nimport math\n\n\ndef _default_matmul_closure_factor(mat):\n return mat\n\n\ndef _default_derivative_quadratic_form_factory(mat):\n def closure(left_vectors, right_vectors):\n if left_vectors.ndimension() == 1:\n left_factor = left_vectors.unsqueeze(0).contiguous()\n right_factor = right_vectors.unsqueeze(0).contiguous()\n else:\n left_factor = left_vectors.contiguous()\n right_factor = right_vectors.contiguous()\n left_factor.unsqueeze_(2)\n right_factor.unsqueeze_(1)\n res = (left_factor * right_factor).sum(dim=0).squeeze_()\n return res,\n return closure\n\n\ndef inv_matmul_factory(matmul_closure_factory=_default_matmul_closure_factor,\n derivative_quadratic_form_factory=_default_derivative_quadratic_form_factory):\n class InvMatmul(Function):\n def __init__(self, *args):\n self.args = args\n\n def forward(self, *args):\n closure_args = self.args + args[:-1]\n rhs = args[-1]\n res = LinearCG().solve(matmul_closure_factory(*closure_args), rhs)\n self.save_for_backward(*(list(args) + [res]))\n return res\n\n def backward(self, grad_output):\n if derivative_quadratic_form_factory is None:\n raise NotImplementedError\n args = self.saved_tensors[:-2]\n closure_args = self.args + args\n res = self.saved_tensors[-1]\n\n arg_grads = [None] * len(args)\n rhs_grad = None\n\n # input_1 gradient\n if any(self.needs_input_grad[:-1]):\n lhs_matrix_grad = LinearCG().solve(matmul_closure_factory(*closure_args), grad_output)\n lhs_matrix_grad = lhs_matrix_grad.mul_(-1)\n if res.ndimension() == 1:\n res = res.unsqueeze(1)\n if lhs_matrix_grad.ndimension() == 1:\n lhs_matrix_grad = lhs_matrix_grad.unsqueeze(1)\n\n arg_grads = list(derivative_quadratic_form_factory(*args)(lhs_matrix_grad.t(), res.t()))\n\n # input_2 gradient\n if self.needs_input_grad[-1]:\n rhs_grad = LinearCG().solve(matmul_closure_factory(*closure_args), grad_output)\n\n return tuple(arg_grads + [rhs_grad])\n\n return InvMatmul\n\n\ndef matmul_factory(matmul_closure_factory=_default_matmul_closure_factor,\n derivative_quadratic_form_factory=_default_derivative_quadratic_form_factory):\n class Matmul(Function):\n def __init__(self, *args):\n self.args = args\n\n def forward(self, *args):\n closure_args = self.args + args[:-1]\n rhs = args[-1]\n res = matmul_closure_factory(*closure_args)(rhs)\n self.save_for_backward(*args)\n return res\n\n def backward(self, grad_output):\n if derivative_quadratic_form_factory is None:\n raise NotImplementedError\n args = self.saved_tensors[:-1]\n rhs = self.saved_tensors[-1]\n closure_args = self.args + args\n\n arg_grads = [None] * len(args)\n rhs_grad = None\n\n # input_1 gradient\n if any(self.needs_input_grad[:-1]):\n if rhs.ndimension() == 1:\n rhs = rhs.unsqueeze(1)\n if grad_output.ndimension() == 1:\n grad_output_matrix = grad_output.unsqueeze(1)\n else:\n grad_output_matrix = grad_output\n\n arg_grads = list(derivative_quadratic_form_factory(*args)(grad_output_matrix.t(), rhs.t()))\n\n # input_2 gradient\n if self.needs_input_grad[-1]:\n rhs_grad = matmul_closure_factory(*closure_args)(grad_output)\n\n return tuple(arg_grads + [rhs_grad])\n\n return Matmul\n\n\ndef trace_logdet_quad_form_factory(matmul_closure_factory=_default_matmul_closure_factor,\n derivative_quadratic_form_factory=_default_derivative_quadratic_form_factory):\n class TraceLogDetQuadForm(Function):\n def forward(self, mu_diff, chol_covar1, *covar2_args):\n covar2_matmul_closure = matmul_closure_factory(*covar2_args)\n\n # log |K2|\n slq = StochasticLQ(num_random_probes=10, cls=type(covar2_args[0]))\n log_det_covar2, = slq.evaluate(covar2_matmul_closure, len(mu_diff), [lambda x: x.log()])\n\n # Tr(K2^{-1}K1)\n def matmul_closure(sample_matrix):\n rhs_vectors = chol_covar1.t().contiguous().matmul(chol_covar1.matmul(sample_matrix))\n return LinearCG().solve(covar2_matmul_closure, rhs_vectors)\n\n sample_matrix, mat_inv_vectors = trace_components(None, matmul_closure, size=len(mu_diff),\n tensor_cls=type(chol_covar1))\n trace = (sample_matrix * mat_inv_vectors).sum()\n\n # Inverse quad form\n mat_inv_y = LinearCG().solve(covar2_matmul_closure, mu_diff)\n inv_quad_form = mat_inv_y.dot(mu_diff)\n\n res = log_det_covar2 + trace + inv_quad_form\n\n self.save_for_backward(*([mu_diff] + [chol_covar1] + list(covar2_args)))\n self.covar2_matmul_closure = covar2_matmul_closure\n self.mat_inv_y = mat_inv_y\n\n return mu_diff.new().resize_(1).fill_(res)\n\n def backward(self, grad_output):\n if derivative_quadratic_form_factory is None:\n raise NotImplementedError\n grad_output_value = grad_output.squeeze()[0]\n\n args = self.saved_tensors\n\n mu_diff = args[0]\n chol_covar1 = args[1]\n covar2_args = args[2:]\n\n mat_inv_y = self.mat_inv_y\n covar2_matmul_closure = self.covar2_matmul_closure\n\n grad_mu_diff = None\n grad_cholesky_factor = None\n grad_covar2_args = [None] * len(covar2_args)\n\n if self.needs_input_grad[0]:\n # Need gradient with respect to mu_diff\n grad_mu_diff = mat_inv_y.mul(2 * grad_output_value)\n\n if self.needs_input_grad[1]:\n # Compute gradient with respect to the Cholesky factor L\n grad_cholesky_factor = 2 * LinearCG().solve(matmul_closure_factory(*covar2_args), chol_covar1)\n grad_cholesky_factor.mul_(grad_output_value)\n\n if any(self.needs_input_grad[2:]):\n # Compute gradient with respect to covar2\n for i in range(len(covar2_args)):\n if self.needs_input_grad[i + 2]:\n grad_covar2_args[i] = covar2_args[i].new().resize_as_(covar2_args[i]).zero_()\n\n quad_part = derivative_quadratic_form_factory(*covar2_args)(mat_inv_y, mat_inv_y)\n\n def right_matmul_closure(sample_matrix):\n rhs_vectors = chol_covar1.t().contiguous().mm(chol_covar1.mm(sample_matrix))\n return sample_matrix - LinearCG().solve(covar2_matmul_closure, rhs_vectors)\n\n def left_matmul_closure(sample_matrix):\n return LinearCG().solve(covar2_matmul_closure, sample_matrix)\n\n left_vectors, right_vectors = trace_components(left_matmul_closure, right_matmul_closure,\n size=len(mu_diff), tensor_cls=type(mat_inv_y))\n\n grad_covar2_fn = derivative_quadratic_form_factory(*covar2_args)\n grad_covar2_args = list(grad_covar2_fn(left_vectors.t(), right_vectors.t()))\n\n for i in range(len(covar2_args)):\n if grad_covar2_args[i] is not None:\n grad_covar2_args[i].add_(-quad_part[i])\n grad_covar2_args[i].mul_(grad_output_value)\n\n return tuple([grad_mu_diff] + [grad_cholesky_factor] + grad_covar2_args)\n\n return TraceLogDetQuadForm\n\n\ndef exact_gp_mll_factory(matmul_closure_factory=_default_matmul_closure_factor,\n derivative_quadratic_form_factory=_default_derivative_quadratic_form_factory):\n class ExactGPMLL(Function):\n def forward(self, *args):\n closure_args = args[:-1]\n labels = args[-1]\n\n matmul_closure = matmul_closure_factory(*closure_args)\n mat_inv_labels = LinearCG().solve(matmul_closure, labels)\n # Inverse quad form\n res = mat_inv_labels.dot(labels)\n # Log determinant\n slq = StochasticLQ(num_random_probes=10, cls=type(closure_args[0]))\n logdet, = slq.evaluate(matmul_closure, len(labels), [lambda x: x.log()])\n\n res += logdet\n res += math.log(2 * math.pi) * len(labels)\n res *= -0.5\n\n self.mat_inv_labels = mat_inv_labels\n self.matmul_closure = matmul_closure\n self.save_for_backward(*args)\n return labels.new().resize_(1).fill_(res)\n\n def backward(self, grad_output):\n if derivative_quadratic_form_factory is None:\n raise NotImplementedError\n\n closure_args = self.saved_tensors[:-1]\n labels = self.saved_tensors[-1]\n mat_inv_labels = self.mat_inv_labels\n grad_output_value = grad_output.squeeze()[0]\n\n matmul_closure = self.matmul_closure\n closure_arg_grads = [None] * len(closure_args)\n labels_grad = None\n\n # input_1 gradient\n if any(self.needs_input_grad[:-1]):\n for i in range(len(closure_args)):\n if self.needs_input_grad[i]:\n closure_arg_grads[i] = closure_args[i].new().resize_as_(closure_args[i]).zero_()\n else:\n closure_arg_grads[i] = None\n\n quad_form_part = derivative_quadratic_form_factory(*closure_args)(mat_inv_labels, mat_inv_labels)\n\n def left_matmul_closure(sample_matrix):\n return LinearCG().solve(matmul_closure, sample_matrix)\n left_vectors, right_vectors = trace_components(left_matmul_closure, None, size=len(labels),\n tensor_cls=type(mat_inv_labels))\n closure_arg_grads = list(derivative_quadratic_form_factory(*closure_args)(left_vectors.t(),\n right_vectors.t()))\n for i in range(len(closure_args)):\n if self.needs_input_grad[i]:\n closure_arg_grads[i] = quad_form_part[i].add_(-closure_arg_grads[i])\n closure_arg_grads[i].mul_(0.5 * grad_output_value)\n\n # input_2 gradient\n if self.needs_input_grad[-1]:\n # Need gradient with respect to labels\n labels_grad = mat_inv_labels.mul_(-grad_output_value)\n\n return tuple(closure_arg_grads + [labels_grad])\n\n return ExactGPMLL\n","sub_path":"gpytorch/utils/function_factory.py","file_name":"function_factory.py","file_ext":"py","file_size_in_byte":11226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246066914","text":"import fnmatch\nimport os, sys\nfrom lxml import etree\nfrom .release import release_publication\n\nSNAPSHOT_TAG_SUFFIX = \"SNAPSHOT\"\n\n\ndef maven_get_version(workspace=None):\n # read current version\n workspace = workspace or os.environ.get('GITHUB_WORKSPACE')\n pom_path = os.path.join(workspace, 'pom.xml')\n pom_doc = etree.parse(pom_path)\n r = pom_doc.xpath('/pom:project/pom:version',\n namespaces={'pom': 'http://maven.apache.org/POM/4.0.0'})\n version = r[0].text\n print(f'Yo yo maven gets version {version} from the pom', file=sys.stderr)\n return version\n\n\ndef maven_upload_assets(repo_name, tag_name, release):\n \"\"\"\n Upload packages produced by maven\n\n \"\"\"\n print(f'Yo yo maven upload assets for {repo_name} and tag {tag_name}', file=sys.stderr)\n # upload assets\n assets = ['*-bin.tar.gz', '*-bin.zip', '*.jar']\n for dirname, subdirs, files in os.walk(os.environ.get('GITHUB_WORKSPACE')):\n if dirname.endswith('target'):\n for extension in assets:\n for filename in fnmatch.filter(files, extension):\n with open(os.path.join(dirname, filename), 'rb') as f_asset:\n release.upload_asset('application/tar+gzip',\n filename,\n f_asset)\n\n\ndef main():\n release_publication(SNAPSHOT_TAG_SUFFIX, maven_get_version, maven_upload_assets)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pds_github_util/release/maven_release.py","file_name":"maven_release.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270621342","text":"from datetime import date,timedelta\nfrom time import strptime,sleep\nfrom configparser import ConfigParser as conpar\nimport sys\nimport getopt\nimport pickle\nimport subprocess\nfrom subs_tools import *\nfrom global_config import *\n\n\ndef do_plotting():\n d_s = get_date('23092013')\n wts,cals,calr,lifts,days = get_stats(d_s,14)\n print(wts)\n #make_plot([],wts,'Weight')\n #Calculate n day average\n n_av = 4\n wts_n_av = []\n for i,wt in enumerate(wts):\n print(wt)\n if i < n_av:\n wts_n_av.append(wt)\n else:\n wts_n_av.append(sum(wts[i-n_av:i])/n_av)\n print(wts_n_av)\n make_plot([],wts_n_av,'Weight')\n\ndef make_plot(x_vec,y_vec,title):\n gnuplot = subprocess.Popen([\"/usr/bin/gnuplot\", \"-persist\"], \n stdin=subprocess.PIPE)\n titlestr = \"set title '\" + title+\"'\\n\"\n gnuplot.stdin.write(titlestr.encode('UTF-8'))\n gnuplot.stdin.write(b\"plot '-' notitle\\n\")\n for i in range(len(y_vec)):\n instr = \"%f\\n\" % (y_vec[i])\n gnuplot.stdin.write(instr.encode('UTF_8'))\n gnuplot.stdin.write(b\"exit\\n\")\n gnuplot.stdin.flush()\n gnuplot.stdin.close()\ndef record_day(indate=False):\n if not indate:\n indate = get_date(indate)\n current_day = tmday(indate)\n try:\n write_day(current_day)\n except UserWarning:\n sys.exit(\"Aborting\")\n print(\"%s day recorded\\n\" % (current_day.daytype))\n current_day.stats()\n\ndef mod_day(inday):\n print(\"%s of week %s fetched, %s\" \n % (inday.date.strftime(\"%A\"),inday.wk,inday.date.strftime(\"%d/%m/%Y\")))\n domod = True\n while domod == True:\n print(\"What would you like to modify?\")\n print(\"1: Day data. I.e. date, day type\")\n print(\"2: Body data. I.e. weight, calories\")\n print(\"3: Lift data.\")\n while True:\n modinput = input(\"Enter 1, 2 or 3: \")\n if modinput.lower() in ('1','one'):\n inday.date = get_date()\n inday.rec_daytype()\n break\n elif modinput.lower() in ('2','two'):\n inday.rec_body()\n break\n elif modinput.lower() in ('3','three'):\n inday.rec_lifts()\n break\n print(\"Unrecognized input, try again.\")\n while True:\n check = input(\"Would you like to modify more? y/n \")\n if check in yeslist:\n break\n elif check in nolist:\n domod = False\n break\n print(\"Unrecognized input, try again.\")\n print(\"Modification complete, attempting to save new data.\")\n try:\n write_day(inday)\n except UserWarning:\n sys.exit(\"Aborting\")\n\n\nclass tmday(object):\n def __init__(self,indate):\n#Get default data from config\n self.height = default_height\n self.birthdate = default_birthdate\n self.age = self.calc_age(self.birthdate)\n self.actx = default_actx\n self.date = indate\n print(\"Recording data for %s.\\n\" % (self.date))\n self.wk = self.date.isocalendar()[1]\n self.rec_body()\n self.rec_daytype()\n if self.daytype != \"Rest\":\n self.rec_lifts()\n def stats(self):\n print(\"------------------------------------\")\n print(\"Statistics for \\033[91m%s\\033[0m of week \\033[92m%d\\033[0m\\n\"\\\n % (self.date.strftime(\"%A\"),self.wk))\n print(\"Date: %s\" %(self.date.strftime(\"%d/%m/%y\")))\n print(\"Bodyweight: %6.2f kilos\" % (self.weight))\n print(\"Calories consumed: %6.2f kcal\" % (self.calories))\n print(\"Maintenance calories: %6.2f kcal\" %\n (self.maintenance))\n print(\"Daily caloric balance: %6.2f\" % (self.calories - self.maintenance))\n print(\"This day was a %s day\" % (self.daytype))\n if self.daytype != \"Rest\":\n print(\"\\nLifts performed:\")\n for i,el in enumerate(self.lifts):\n ex=el[len(self.daytype)+1:]\n ex = ex[0].upper()+ex[1:]\n print(\"%d: %s of %d sets, %d reps at %5.2f kilos.\"%\n (i+1,ex,self.lifts[el][0],self.lifts[el][1],self.lifts[el][2]))\n print(\"------------------------------------\")\n def rec_body(self):\n self.weight = float(input(\"Enter bodyweight in kilos: \"))\n self.calories = int(input(\"Enter consumed Calories (kcal): \"))\n self.maintenance = self.calc_maint(self.weight,self.height,self.age,self.actx)\n def rec_daytype(self):\n out = input(\"Enter type of Texas Method day.\\n(V)olume day, (R)ecovery day, (I)ntensity day or (N)on workout day. \")\n while True:\n if out in (\"V\",\"v\",\"Volume\",\"volume\"):\n self.daytype = \"Volume\"\n break\n if out in (\"R\",\"r\",\"Recovery\",\"recovery\"):\n self.daytype = \"Recovery\"\n break\n if out in (\"I\",\"i\",\"Intensity\",\"intensity\"):\n self.daytype = \"Intensity\"\n break\n if out in (\"N\",\"n\",\"non\",\"Non\",\"Non workout\", \"non workout\"):\n self.daytype = \"Rest\"\n break\n print(\"Unrecognized option, try again\")\n def rec_lifts(self):\n self.lifts = {}\n isDone = False\n while isDone == False:\n while True:\n lift = input(\"Enter excercise performed. (Enter (D)one if finished) \")\n if lift in (\"d\",\"D\",\"Done\",\"done\"):\n isDone = True\n break\n srs = self.daytype.lower()+\" \"+lift.lower()\n self.liftset =set(l for l in default_lifts)\n if srs in self.liftset:\n print(\"Lift found\")\n liftname = srs[0].upper()+srs[1:]\n while True:\n defcheck = input(\"Use default set/rep scheme?\\n(%d sets of %d reps.) \"\n %(default_lifts[srs][0],default_lifts[srs][1]))\n if defcheck in yeslist:\n while True:\n try:\n weight = float(input(\"Enter weight lifted in kilos: \"))\n except ValueError:\n print(\"Error! Input must be a number\")\n continue\n break\n print(\"%s of %d sets, %d reps at %5.2f kilos recorded.\"\n %(liftname,default_lifts[srs][0],default_lifts[srs][1],weight))\n self.lifts[liftname]=[default_lifts[srs][0],default_lifts[srs][1],weight]\n break\n elif defcheck in nolist:\n while True:\n try:\n sinput = int(input(\"Enter number of sets performed: \"))\n except ValueError:\n print(\"Error! Input must be an integer.\")\n continue\n break\n while True:\n try:\n rinput = int(input(\"Enter number of reps performed: \"))\n except ValueError:\n print(\"Error! Input must be an integer\")\n continue\n break\n while True:\n try:\n winput = float(input(\"Enter weight lifted: \"))\n except ValueError:\n print(\"Error! Input must be a number\")\n continue\n break\n print(\"%s of %d sets, %d reps at %5.2f kilos recorded.\"\n % (liftname,sinput,rinput,winput))\n self.lifts[liftname] = [sinput,rinput,winput]\n break\n print(\"Unrecognized input, try again\")\n break \n print(\"Lift not in default set of lifts, try again\")\n def calc_age(self,bdate):\n today = date.today()\n birthday = bdate.replace(year=today.year)\n if birthday > today:\n return today.year - bdate.year - 1\n else:\n return today.year - bdate.year \n def calc_maint(self,wt,ht,ag,mult):\n return mult*(66 + 13.7*wt + 5*ht - 4.7*ag)\n","sub_path":"subs_main.py","file_name":"subs_main.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235544352","text":"\"\"\"careertest URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url, include\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url('cjb/', include('cjb.urls')),\n url('pjs/', include('pjs.urls')),\n url('zjj/', include('zjj.urls')),\n url('fxj/', include('fxj.urls')),\n url('qsj/', include('qsj.urls'))\n]\n","sub_path":"h_project_of_course/django/careertest/careertest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"324870380","text":"import nltk\nfrom nltk import pos_tag\n\n\n\n# POS tag pattern extraction\ndef pattern(text):\n patt = []\n sentence = nltk.word_tokenize(text)\n sent = pos_tag(sentence)\n force_tags = {'declare': 'VB', 'define': 'VB','write': 'VB', 'list': 'VB' , 'creating': 'NN', 'stand': 'VB', 'oriented': 'NN',\n 'overriding': 'NN', 'object': 'NN', 'generate': 'VB', 'check': 'VB', 'following': 'VB','name': 'VB','provide': 'VB',\n 'compare': 'VB','contrast': 'VB','suppose': 'VB','use': 'VB','select': 'VB','state': 'VB','java': 'NN','correct': 'VB',\n 'assign': 'VB','add': 'VB','print': 'VB','implement': 'VB','output': 'VB','arrange': 'VB','modify': 'VB'}\n new_tagged_words = [(word, force_tags.get(word, tag)) for word, tag in sent]\n for i in new_tagged_words:\n patt.append('<'+i[1]+'>')\n patter = ''.join(patt)\n return patter\n\n# preprocessed input\nq = \"compare contrast array vector used java\"\nprint(\"POS tag Pattern\")\nprint(pattern(q))","sub_path":"RESEMPL/RuleBased/Feature Extraction.py","file_name":"Feature Extraction.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232121039","text":"import time\nfrom datetime import datetime, timedelta\nfrom .form import Form\nfrom chronos.features.time_bank_transaction_feature import TimeBankTransactionFeature\n\n\nclass CreateTransactionForm(Form):\n\n fields = ('time', 'type')\n\n def __init__(self, user, data=None):\n super(CreateTransactionForm, self).__init__(data)\n self.user = user\n\n def validate(self):\n time = self.data.get('time')\n type = self.data.get('type')\n if not type:\n self.set_error('type', 'Required')\n if not time or not time.strip():\n self.set_error('time', 'Required')\n return\n seconds = self.time_string_to_seconds(time)\n if seconds < 0:\n self.set_error('time', 'Invalid time')\n return\n if seconds == 0:\n self.set_error('time', 'Cannot be zero')\n\n def time_string_to_seconds(self, string):\n if not string:\n return 0\n string = string.strip()\n patterns = ['%Ss', '%Mm', '%Mm %Ss', '%Hh', '%Hh %Mm', '%Hh %Mm %Ss']\n for pattern in patterns:\n seconds = self.parse_time_string(string, pattern)\n if seconds >= 0:\n break;\n return seconds\n\n def parse_time_string(self, string, pattern):\n try:\n time_struct = time.strptime(string, pattern)\n timestamp = time.mktime(time_struct)\n date = datetime.fromtimestamp(timestamp)\n delta = timedelta(hours=date.hour, minutes=date.minute, seconds=date.second)\n return delta.total_seconds()\n except:\n return -1\n\n def create_transaction(self):\n seconds = self.time_string_to_seconds(self.data.get('time'))\n if self.data.get('type') == 'debit':\n seconds *= -1\n feature = TimeBankTransactionFeature(self.user)\n feature.create_transaction(self.user, seconds)\n","sub_path":"chronos/web/forms/create_transaction_form.py","file_name":"create_transaction_form.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"339784029","text":"#!/usr/bin/python3\n\nimport socket\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\n\nimport requests\nimport polyglot\nfrom polyglot.mapping import Embedding\nimport requests, tweepy, re\nimport time\n\nhostName = \"\"\nhostPort = 2000\n\n# Define categories\nCATEGORIES = [[\"bostäder\", \"bostad\", \"byggande\", \"bygger\"], [\"polis\", \"försvar\", \"brott\", \"kriminalitet\"],\n [\"sjukvård\", \"hälsa\", \"sjukhus\", \"välfärd\", \"välfärden\"]]\nCATEGORY_NAMES = [\"Bostäder\", \"Polisen\", \"Sjukvården\"]\n\n\ndef load_from_config(config_file=\"settings.config\"):\n with open(config_file) as f:\n settings = {item[0].strip(): item[1].strip() for item in [l.split(\"=\") for l in f.readlines()]}\n return settings\n\n\ndef download_language_model():\n from polyglot.downloader import downloader\n downloader.download(\"embeddings2.sv\")\n\n\ndef categorize_tweets(currentTwitterAccount, n_max_tweets=5, settings=None):\n if not settings:\n settings = load_from_config()\n\n subscription_key = settings[\"subscription_key\"]\n api_url = \"https://westcentralus.api.cognitive.microsoft.com/text/analytics/v2.0/\"\n key_phrase_api_url = api_url + \"keyPhrases\"\n language_api_url = api_url + \"languages\"\n\n embeddings = Embedding.load(settings[\"model_location\"])\n\n consumer_key = settings[\"consumer_key\"]\n consumer_secret = settings[\"consumer_secret\"]\n access_token = settings[\"access_token\"]\n access_token_secret = settings[\"access_token_secret\"]\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n # Fetch swedish tweets\n\n def language_check(string):\n headers = {\"Ocp-Apim-Subscription-Key\": subscription_key}\n response = requests.post(language_api_url, headers=headers, json={\"documents\": [{\"id\": 1, \"text\": string}]})\n if response.ok:\n return response.json()[\"documents\"][0][\"detectedLanguages\"][0][\"iso6391Name\"]\n else:\n if response.status_code == 429:\n time.sleep(1)\n return language_check(string)\n response.raise_for_status()\n\n documents = {\"documents\": []}\n tweets_raw = []\n i = 0\n for tweet in tweepy.Cursor(api.user_timeline, id=currentTwitterAccount, tweet_mode=\"extended\").items(n_max_tweets):\n # removing the http link at the end of the text\n result = re.sub(r\"http\\S+\", \"\", tweet.full_text)\n if language_check(result) == \"sv\":\n documents['documents'].append({'id': i, 'language': 'sv', 'text': result})\n tweets_raw.append((result, tweet.created_at))\n i += 1\n\n ### Extract key words\n\n headers = {\"Ocp-Apim-Subscription-Key\": subscription_key}\n response = requests.post(key_phrase_api_url, headers=headers, json=documents)\n key_phrases = response.json()\n\n # Parse key words\n key_words = [[y for y in x.values()][0] for x in key_phrases[\"documents\"]]\n key_words = [[y.split(\" \") for y in x] for x in key_words]\n key_words = [[y.strip() for sublist in l for y in sublist] for l in key_words]\n\n ### Determine closest category for the sets of key words\n\n def embedding_distances(word, category): # Adapter to handle missing words for embedding model\n try:\n return embeddings.distances(word, category)\n except:\n return [1e16] # If word is not present, return big integer..\n\n def topic(word): # Determine category score for word\n topic_list = [embedding_distances(word.lower(), category) for category in\n CATEGORIES] # compute distances to categories\n topic_list = [min(l) for l in topic_list] # compute average of each sublist\n min_value = min(topic_list)\n return topic_list.index(min_value), min_value\n\n topic_dists = [[topic(word) for word in l] for l in key_words]\n\n def cluster_topics(topic_dist):\n topic_dict = {}\n for t in topic_dist:\n if t[0] in topic_dict:\n topic_dict[t[0]] = (min(topic_dict[t[0]][0], t[1]), topic_dict[t[0]][1] + 1)\n else:\n topic_dict[t[0]] = (t[1], 1)\n topics = [(key, value[0]) for key, value in topic_dict.items()]\n values = [x[1] for x in topics]\n return topics[values.index(min(values))]\n\n categorized_tweets = [{\"text\": tweets_raw[i][0], \"category\": CATEGORY_NAMES[cluster_topics(topic_dists[i])[0]],\n \"time\": str(tweets_raw[i][1])} for i in range(len(topic_dists))]\n return categorized_tweets\n\n\nclass HttpServer(BaseHTTPRequestHandler):\n\n #\tGET is for clients geting the predi\n def do_GET(self):\n temp = categorize_tweets(currentTwitterAccount)\n self.send_response(200)\n self.wfile.write(bytes(str(temp), \"latin1\"))\n\n #\tPOST is for submitting data.\n def do_POST(self):\n print(\"incomming http: \", self.path)\n\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n self.send_response(200)\n\n client.close()\n\nif __name__ == '__main__':\n currentTwitterAccount = \"shekarabi\"\n\n myServer = HTTPServer((hostName, hostPort), HttpServer)\n print(time.asctime(), \"Server Starts - %s:%s\" % (hostName, hostPort))\n\n try:\n myServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n myServer.server_close()\n print(time.asctime(), \"Server Stops - %s:%s\" % (hostName, hostPort))\n","sub_path":"nlp/PyServer/PyServer.py","file_name":"PyServer.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70573523","text":"\ndef threeSum(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n nums.sort()\n length = len(nums)\n for i in range(length-2):\n if(nums[i]>0):break\n if (i>0) and (nums[i]==nums[i-1]):continue\n l = i+1\n r = length-1\n while l0:\n r -= 1\n else:\n res.append([nums[i],nums[l],nums[r]])\n while (l\n {}\n {}\n {}\n {}\n {}\n \\n \"\"\".format(\n self.user.strip(),\n self.tile.strip(),\n self.host_name.strip(),\n self.ip.strip(),\n status))\n\nthreads = [IsDeviceAlive() for x in range(1000)]\n\nfor t in threads:\n t.start()\n\nfor t in threads:\n t.join()\n\nf.close()\n\nenv = Environment(loader=PackageLoader('wrapper', 'templates'))\nenv.filters['content'] = content\nenv.filters['timestamp'] = timestamp\ntemplate = env.get_template('base.html')\n\nreport_name = str(uuid.uuid4()) + \".html\"\nwith open(report_name, \"w\") as f:\n f.write(template.render(status_file=\"temp\", source_file=\"lvsw_host_excel\"))\n\nprint(\"Report {} generated\".format(report_name))\n","sub_path":"IsAlive/lsvw_is_alive.py","file_name":"lsvw_is_alive.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"542682282","text":"# Funções utilizadas no trabalho 1 (T1)\r\n\r\nimport numpy as np\r\n\r\ndef custo_reglin_regularizada(theta, X, y, _lambda):\r\n # Quantidade de exemplos\r\n m = len(X)\r\n theta = np.matrix(theta)\r\n\r\n # não considera theta0 para o cálculo\r\n theta_j = theta[:,1:]\r\n regularizacao = (_lambda /(2 * m)) * np.sum(theta_j.dot(theta_j.T)) \r\n\r\n erro = X.dot(theta.T) - y\r\n\r\n # Computa a função de custo J\r\n J = (np.sum(np.power(erro, 2)))/ (2 * m) \r\n \r\n return J + regularizacao\r\n\r\ndef gd_reglin_regularizada(theta, X, y, _lambda):\r\n m = len(X)\r\n theta = np.matrix(theta)\r\n X = np.matrix(X)\r\n y = np.matrix(y)\r\n\r\n erro = (X.dot(theta.T)) - y\r\n \r\n gradient = X.T.dot(erro) / m\r\n\r\n theta_j = theta[:,1:]\r\n regularizacao = (_lambda / m) * theta_j\r\n # insere zero como termo de regularização para theta0\r\n regularizacao = np.insert(regularizacao, 0, 0, axis=1)\r\n\r\n return gradient + regularizacao.T\r\n\r\ndef minimizar_funcao(theta, X, y, _lambda):\r\n return opt.fmin_tnc(func = custo_reglin_regularizada,\r\n x0 = theta,\r\n fprime = gd_reglin_regularizada,\r\n args = (X, y, _lambda))","sub_path":"Aprendizado de Máquinas/Trabalho 2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529756243","text":"n = input()\r\narr = [int(x) for x in input().split()]\r\n\r\nleft = [0]\r\nfor val in arr[:-1]:\r\n left.append((val + left[-1]) // 2)\r\n \r\nright = [0]\r\nfor val in reversed(arr[1:]):\r\n right.append((val + right[-1]) // 2)\r\nright.reverse()\r\n\r\nbest = 0\r\n\r\nfor a, b, c in zip(arr, left, right):\r\n best = max(best, a + b + c)\r\n \r\nprint(best)","sub_path":"october circuits 2020/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"88289683","text":"# Copyright (C) 2019 GreenWaves Technologies\n# All rights reserved.\n\n# This software may be modified and distributed under the terms\n# of the BSD license. See the LICENSE file for details.\n\nimport json\nimport logging\nimport os\nimport pickle\nfrom pathlib import Path\n\nfrom generation.memory_device_info import MemoryDeviceInfos\nfrom graph.graph_identity import GraphIdentity\nfrom graph.matches.matches import get_fusion\nfrom graph.nngraph import NNGraph\nfrom importer.importer import create_graph\nfrom quantization.qtype import QType\nfrom quantization.quantization_record import QuantizationRecord\nfrom utils.node_id import NodeId\n\nLOG = logging.getLogger('nntool.'+__name__)\n\nclass StateEncoder(json.JSONEncoder):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n# pylint: disable=no-self-use, method-hidden\n def default(self, o):\n if isinstance(o, NodeId):\n return {'__type': 'NodeId', 'val': o.to_json_str()}\n if isinstance(o, QuantizationRecord):\n return o.to_dict()\n if isinstance(o, QType):\n return {'__type': 'QType', 'bits': o.bits, 'q': o.q, 'signed': o.signed}\n if isinstance(o, MemoryDeviceInfos):\n return {'__type': 'MemoryDeviceInfos', 'infos': o.todict()}\n # Let the base class default method raise the TypeError\n return json.JSONEncoder.default(self, o)\n\nclass StateDecoder(json.JSONDecoder):\n def __init__(self, *args, **kwargs):\n json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)\n\n# pylint: disable=no-self-use, method-hidden\n def object_hook(self, obj):\n if '__type' in obj:\n if obj['__type'] == 'QType':\n return QType(bits=obj['bits'], q=obj['q'], signed=obj['signed'])\n if obj['__type'] == 'MemoryDeviceInfos':\n return MemoryDeviceInfos.fromdict(obj['infos'])\n if obj['__type'] == 'NodeId':\n return NodeId.from_json_str(obj['val'])\n if obj['__type'].startswith('QuantizationRecord'):\n return QuantizationRecord.from_dict(obj)\n\n if 'qstats' in obj:\n # fix qstats int dictionary index. JSON converts to string.\n obj['qstats'] = {int(k):v for k, v in obj['qstats'].items()}\n return obj\n\nSTATE_EXTENSION = \".json\"\nARRS_EXTENSION = \".nnparam\"\n\ndef dump_state(G: NNGraph,\n include_parameters=True,\n state_path=None,\n extra=None):\n\n parameters = get_parameters(G) if include_parameters else {}\n\n if state_path is not None:\n graph_base, _ = os.path.splitext(state_path)\n if os.path.isdir(graph_base):\n graph_base = os.path.join(graph_base,\n os.path.basename(G.graph_identity.filename))\n else:\n graph_base, _ = os.path.splitext(G.graph_identity.filename)\n\n info_state = {\n 'identity': G.graph_identity.identity,\n 'info': convert_keys_to_str(G.info),\n 'load_parameters': include_parameters,\n 'name': G.name,\n 'extra': extra\n }\n\n state_filename = graph_base + STATE_EXTENSION\n LOG.info(\"dumping graph state to %s\", state_filename)\n with open(state_filename, \"w\") as json_fp:\n json.dump(info_state, json_fp, indent=2, cls=StateEncoder)\n\n if include_parameters:\n pickle_filename = graph_base + ARRS_EXTENSION\n LOG.info(\"dumping tensors to %s\", pickle_filename)\n with open(pickle_filename, \"wb\") as param_fp:\n pickle.dump(parameters, param_fp)\n\ndef convert_keys_to_str(info):\n if isinstance(info, list):\n return [convert_keys_to_str(elem) for elem in info]\n if isinstance(info, dict):\n if info and isinstance(list(info.keys())[0], NodeId):\n return {'NodeId_' + k.to_json_str():convert_keys_to_str(v) for k, v in info.items()}\n else:\n return {k:convert_keys_to_str(v) for k, v in info.items()}\n return info\n\ndef convert_str_to_keys(info):\n if isinstance(info, list):\n return [convert_keys_to_str(elem) for elem in info]\n if isinstance(info, dict):\n return {(NodeId.from_json_str(k[7:]) if k.startswith('NodeId_') else k): \\\n convert_str_to_keys(v) for k, v in info.items()}\n return info\n\ndef get_parameters(G):\n parameters = {}\n for _, pnode, _, fnode in G.nodes_iterator():\n anode = pnode if not fnode else fnode\n parameters[NodeId(pnode, fnode)] = anode.get_parameters()\n return parameters\n\ndef set_parameters(G, parameters):\n for _, pnode, _, fnode in G.nodes_iterator():\n anode = pnode if not fnode else fnode\n anode.set_parameters(parameters[NodeId(pnode, fnode)])\n\ndef load_state(graph_file: str, value_cache=None, return_extra=False):\n graph_base, _ = os.path.splitext(graph_file)\n state_filename = graph_base + STATE_EXTENSION\n state_file = Path(state_filename)\n\n LOG.info(\"loading graph state from %s\", state_filename)\n if not state_file.is_file():\n raise ValueError(\"state file not found\")\n with state_file.open('r') as json_fp:\n info_state = json.load(json_fp, cls=StateDecoder)\n\n info_state['info'] = convert_str_to_keys(info_state['info'])\n\n if info_state['load_parameters']:\n pickle_filename = graph_base + ARRS_EXTENSION\n LOG.info(\"loading tensors from %s\", pickle_filename)\n arrs_file = Path(pickle_filename)\n if not arrs_file.is_file():\n raise ValueError(\"arrays file not found\")\n with arrs_file.open('rb') as arrs_fp:\n parameters = pickle.load(arrs_fp)\n else:\n parameters = None\n\n # Here load the orignal graph and replay the transforms that were done to it\n opts = {\n 'load_tensors': False,\n }\n # Retrieve the identity of the saved state\n identity = GraphIdentity(None)\n identity.identity = info_state['identity']\n\n LOG.info(\"loading graph from %s\", identity.filename)\n G = create_graph(identity.filename, opts=opts)\n if 'name' in info_state:\n G.name = info_state['name']\n G.add_dimensions()\n\n if identity.is_adjusted:\n # If weights were saved then don't reshaoe them since it was already done\n # before they were saved\n LOG.info(\"adjusting dimensions\")\n G.adjust_order(reshape_weights=not info_state['load_parameters'])\n G.add_dimensions()\n\n if identity.is_fused:\n LOG.info(\"fusing nodes\")\n # replay the fusions that were carried out\n for fusion_name in identity.fusions:\n fusion = get_fusion(fusion_name)\n fusion.match(G)\n G.add_dimensions()\n\n set_parameters(G, parameters)\n # Update the identity to match the saved graph\n G.info = info_state['info']\n G.graph_identity = identity\n G.value_cache = value_cache\n if return_extra:\n return G, info_state['extra']\n return G\n","sub_path":"tools/nntool/utils/new_param_state.py","file_name":"new_param_state.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"225683997","text":"# -*- coding: UTF-8 -*-\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nos.environ['KERAS_BACKEND'] = 'tensorflow'\r\nimport keras.backend as K\r\nK.set_image_dim_ordering('tf')\r\n\r\nfrom nltk.tokenize import TweetTokenizer\r\nimport datetime\r\nimport lightgbm as lgb\r\nfrom scipy import stats\r\nfrom scipy.sparse import hstack, csr_matrix\r\nfrom sklearn.model_selection import train_test_split, cross_val_score\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.util import ngrams\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn import cross_validation\r\npd.set_option('max_colwidth',400)\r\n\r\nimport re\r\n\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalization\r\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten\r\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\r\nfrom keras.models import Model, load_model\r\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\r\nfrom keras.engine import InputSpec, Layer\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, Callback, EarlyStopping\r\n\r\n\r\ndef build_model1(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32):\r\n file_path = \"best_model.hdf5\"\r\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\r\n save_best_only = True, mode = \"min\")\r\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\r\n \r\n inp = Input(shape = (max_len,))\r\n x = Embedding(19479, embed_size, weights = [embedding_matrix], trainable = False)(inp)\r\n x1 = SpatialDropout1D(spatial_dr)(x)\r\n\r\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\r\n x1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_gru)\r\n avg_pool1_gru = GlobalAveragePooling1D()(x1)\r\n max_pool1_gru = GlobalMaxPooling1D()(x1)\r\n \r\n x3 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_gru)\r\n avg_pool3_gru = GlobalAveragePooling1D()(x3)\r\n max_pool3_gru = GlobalMaxPooling1D()(x3)\r\n \r\n x_lstm = Bidirectional(CuDNNLSTM(units, return_sequences = True))(x1)\r\n x1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_lstm)\r\n avg_pool1_lstm = GlobalAveragePooling1D()(x1)\r\n max_pool1_lstm = GlobalMaxPooling1D()(x1)\r\n \r\n x3 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_lstm)\r\n avg_pool3_lstm = GlobalAveragePooling1D()(x3)\r\n max_pool3_lstm = GlobalMaxPooling1D()(x3)\r\n \r\n \r\n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool3_gru, max_pool3_gru,\r\n avg_pool1_lstm, max_pool1_lstm, avg_pool3_lstm, max_pool3_lstm])\r\n x = BatchNormalization()(x)\r\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\r\n x = BatchNormalization()(x)\r\n x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\r\n x = Dense(5, activation = \"sigmoid\")(x)\r\n model = Model(inputs = inp, outputs = x)\r\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\r\n history = model.fit(X_train, y_ohe, batch_size = 128, epochs = 20, validation_split=0.1, \r\n verbose = 1, callbacks = [check_point, early_stop])\r\n model = load_model(file_path)\r\n return model\r\n\r\ndef build_model2(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32):\r\n file_path = \"best_model.hdf5\"\r\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\r\n save_best_only = True, mode = \"min\")\r\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\r\n\r\n inp = Input(shape = (max_len,))\r\n x = Embedding(19479, embed_size, weights = [embedding_matrix], trainable = False)(inp)\r\n x1 = SpatialDropout1D(spatial_dr)(x)\r\n\r\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\r\n x_lstm = Bidirectional(CuDNNLSTM(units, return_sequences = True))(x1)\r\n \r\n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_gru)\r\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\r\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\r\n \r\n x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_gru)\r\n avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)\r\n max_pool2_gru = GlobalMaxPooling1D()(x_conv2)\r\n \r\n \r\n x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_lstm)\r\n avg_pool1_lstm = GlobalAveragePooling1D()(x_conv3)\r\n max_pool1_lstm = GlobalMaxPooling1D()(x_conv3)\r\n \r\n x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_lstm)\r\n avg_pool2_lstm = GlobalAveragePooling1D()(x_conv4)\r\n max_pool2_lstm = GlobalMaxPooling1D()(x_conv4)\r\n \r\n \r\n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool2_gru, max_pool2_gru,\r\n avg_pool1_lstm, max_pool1_lstm, avg_pool2_lstm, max_pool2_lstm])\r\n x = BatchNormalization()(x)\r\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\r\n x = BatchNormalization()(x)\r\n x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\r\n x = Dense(5, activation = \"sigmoid\")(x)\r\n model = Model(inputs = inp, outputs = x)\r\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\r\n history = model.fit(X_train, y_ohe, batch_size = 128, epochs = 20, validation_split=0.1, \r\n verbose = 1, callbacks = [check_point, early_stop])\r\n model = load_model(file_path)\r\n return model\r\n\r\n\r\ntrain = pd.read_csv('data/train.tsv', sep=\"\\t\")\r\ntest = pd.read_csv('data/test.tsv', sep=\"\\t\")\r\nsub = pd.read_csv('data/sampleSubmission.csv', sep=\",\")\r\n\r\ntokenizer = TweetTokenizer()\r\nvectorizer = TfidfVectorizer(ngram_range=(1, 2), tokenizer=tokenizer.tokenize)\r\nfull_text = list(train['Phrase'].values) + list(test['Phrase'].values)\r\nvectorizer.fit(full_text)\r\n# train_vectorized = vectorizer.transform(train['Phrase'])\r\n# test_vectorized = vectorizer.transform(test['Phrase'])\r\ny = train['Sentiment']\r\n\r\ntk = Tokenizer(lower = True, filters='')\r\ntk.fit_on_texts(full_text)\r\n\r\ntrain_tokenized = tk.texts_to_sequences(train['Phrase'])\r\ntest_tokenized = tk.texts_to_sequences(test['Phrase'])\r\n\r\nmax_len = 50\r\nX_train = pad_sequences(train_tokenized, maxlen = max_len)\r\nX_test = pad_sequences(test_tokenized, maxlen = max_len)\r\n\r\nembedding_path = \"data/crawl-300d-2M.vec\"\r\n\r\nembed_size = 300\r\nmax_features = 30000\r\n\r\ndef get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\r\nembedding_index = dict(get_coefs(*o.strip().split(\" \")) for o in open(embedding_path))\r\n\r\nword_index = tk.word_index\r\nnb_words = min(max_features, len(word_index))\r\nembedding_matrix = np.zeros((nb_words + 1, embed_size))\r\nfor word, i in word_index.items():\r\n if i >= max_features: continue\r\n embedding_vector = embedding_index.get(word)\r\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\r\n\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nohe = OneHotEncoder(sparse=False)\r\ny_ohe = ohe.fit_transform(y.values.reshape(-1, 1))\r\n\r\n\r\nmodel1 = build_model1(lr = 1e-3, lr_d = 1e-10, units = 64, spatial_dr = 0.3, kernel_size1=3, kernel_size2=2, dense_units=32, dr=0.1, conv_size=32)\r\nprint('model1 done')\r\nmodel2 = build_model1(lr = 1e-3, lr_d = 1e-10, units = 128, spatial_dr = 0.5, kernel_size1=3, kernel_size2=2, dense_units=64, dr=0.2, conv_size=32)\r\nprint('model2 done')\r\nmodel3 = build_model2(lr = 1e-4, lr_d = 0, units = 64, spatial_dr = 0.5, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.1, conv_size=32)\r\nprint('model3 done')\r\nmodel4 = build_model2(lr = 1e-3, lr_d = 0, units = 64, spatial_dr = 0.5, kernel_size1=3, kernel_size2=3, dense_units=64, dr=0.3, conv_size=32)\r\nprint('model4 done')\r\nmodel5 = build_model2(lr = 1e-3, lr_d = 1e-7, units = 64, spatial_dr = 0.3, kernel_size1=3, kernel_size2=3, dense_units=64, dr=0.4, conv_size=64)\r\nprint('model5 done')\r\n\r\n\r\npred1 = model1.predict(X_test, batch_size = 1024, verbose = 1)\r\npred = pred1\r\npred2 = model2.predict(X_test, batch_size = 1024, verbose = 1)\r\npred += pred2\r\npred3 = model3.predict(X_test, batch_size = 1024, verbose = 1)\r\npred += pred3\r\npred4 = model4.predict(X_test, batch_size = 1024, verbose = 1)\r\npred += pred4\r\npred5 = model5.predict(X_test, batch_size = 1024, verbose = 1)\r\npred += pred5\r\nprint('predict done')\r\n\r\npredictions = np.round(np.argmax(pred, axis=1)).astype(int)\r\nsub['Sentiment'] = predictions\r\nsub.to_csv(\"blend.csv\", index=False)","sub_path":"gru-in-keras.py","file_name":"gru-in-keras.py","file_ext":"py","file_size_in_byte":9377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208421952","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport html2text\r\nimport pandas as pd\r\n\r\n#create states to loop through, put a dash instead of space to work for the urls\r\nstates = [\"Alabama\",\"Alaska\",\"Arizona\",\"Arkansas\",\"California\",\"Colorado\",\r\n \"Connecticut\",\"Delaware\",\"Florida\",\"Georgia\",\"Hawaii\",\"Idaho\",\"Illinois\",\r\n \"Indiana\",\"Iowa\",\"Kansas\",\"Kentucky\",\"Louisiana\",\"Maine\",\"Maryland\",\r\n \"Massachusetts\",\"Michigan\",\"Minnesota\",\"Mississippi\",\"Missouri\",\"Montana\",\r\n \"Nebraska\",\"Nevada\",\"New-Hampshire\",\"New-Jersey\",\"New-Mexico\",\"New-York\",\r\n \"North-Carolina\",\"North-Dakota\",\"Ohio\",\"Oklahoma\",\"Oregon\",\"Pennsylvania\",\r\n \"Rhode-Island\",\"South-Carolina\",\"South-Dakota\",\"Tennessee\",\"Texas\",\"Utah\",\r\n \"Vermont\",\"Virginia\",\"Washington\",\"West-Virginia\",\"Wisconsin\",\"Wyoming\",\"District-of-Columbia\"]\r\n\r\n#make the states lowercase\r\nstates = [x.lower() for x in states]\r\n\r\n#create a range for collapsibles to then use for format\r\ncollapsibles = [x for x in range(14)]\r\ncollapsibles = collapsibles[1:]\r\n\r\n#create the column names\r\ntitles = ['State',\r\n'Voter registration deadlines',\r\n'Election day registration',\r\n'Voter registration rules',\r\n'How to register to vote',\r\n'Absentee ballot application deadlines',\r\n'Absentee ballot rules',\r\n'How to get an absentee ballot',\r\n'Once you get your absentee ballot',\r\n'Early voting starts',\r\n'Early voting ends',\r\n'In-person voter ID requirements',\r\n'Absentee voter ID requirements',\r\n'Voted absentee ballots are due']\r\n\r\ndf = pd.DataFrame()\r\n\r\nfor i in states:\r\n r = requests.get(\"https://www.vote.org/state/{0}/\".format(i))\r\n #soup = BeautifulSoup(r.text.encode('utf-8').strip(), 'html.parser')\r\n soup = BeautifulSoup(r.text.encode(\"ascii\", errors=\"ignore\"), 'html.parser')\r\n df2 = pd.DataFrame({'State':i},index=[0])\r\n for x in collapsibles:\r\n voter_info = html2text.html2text(str(soup.find(id=\"collapsible-{0}\".format(x))))\r\n df2['{0}'.format(titles[x])] = voter_info #append all the collapsibles\r\n offsite_links = [a['href'] for a in soup.find_all('a', href=True)]\r\n link_names = [a.text for a in soup.find_all('a', href=True)]\r\n offsite_links = offsite_links[38:44]\r\n link_names = link_names[38:44]\r\n for i in range(6):\r\n df2['offsite_link{0}'.format(i)] = offsite_links[i]\r\n df2['offsite_link_desc{0}'.format(i)] = link_names[i]\r\n #df2['offsite_desc'] = link_names[i]\r\n #df3 = pd.DataFrame([ x.split('**') for x in df2['Voter registration deadlines'].tolist() ])\r\n #df4 = pd.DataFrame([ x.split('**') for x in df2.iloc[:,12].tolist() ])\r\n df = df.append(df2, ignore_index = True)\r\n #df = pd.concat([df, df3,df4], axis=1)\r\n#df = df.transpose()\r\n#print(df)\r\ndf.to_csv(\"voter_reg_states_info.csv\")","sub_path":"voter_reg_info.py","file_name":"voter_reg_info.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"197726770","text":"import base64\nfrom os import path\n\nfrom docusign_esign import EnvelopesApi, RecipientViewRequest, Document, Signer, EnvelopeDefinition, SignHere, Tabs, \\\n Recipients\nfrom flask import session, url_for, request\n\nfrom ..consts import authentication_method, demo_docs_path, pattern, signer_client_id\nfrom ..docusign import create_api_client\nfrom ..ds_config import DS_CONFIG\n\n\nclass Eg001Controller:\n @staticmethod\n def get_args():\n \"\"\"Get request and session arguments\"\"\"\n # More data validation would be a good idea here\n # Strip anything other than characters listed\n # 1. Parse request arguments\n signer_email = pattern.sub(\"\", request.form.get(\"signer_email\"))\n signer_name = pattern.sub(\"\", request.form.get(\"signer_name\"))\n envelope_args = {\n \"signer_email\": signer_email,\n \"signer_name\": signer_name,\n \"signer_client_id\": signer_client_id,\n \"ds_return_url\": url_for(\"ds.ds_return\", _external=True),\n }\n args = {\n \"account_id\": session[\"ds_account_id\"],\n \"base_path\": session[\"ds_base_path\"],\n \"access_token\": session[\"ds_access_token\"],\n \"envelope_args\": envelope_args\n }\n return args\n\n @classmethod\n def worker(cls, args):\n \"\"\"\n 1. Create the envelope request object\n 2. Send the envelope\n 3. Create the Recipient View request object\n 4. Obtain the recipient_view_url for the signing ceremony\n \"\"\"\n envelope_args = args[\"envelope_args\"]\n # 1. Create the envelope request object\n envelope_definition = cls.make_envelope(envelope_args)\n\n # 2. call Envelopes::create API method\n # Exceptions will be caught by the calling function\n api_client = create_api_client(base_path=args[\"base_path\"], access_token=args[\"access_token\"])\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.create_envelope(account_id=args[\"account_id\"], envelope_definition=envelope_definition)\n\n envelope_id = results.envelope_id\n\n # 3. Create the Recipient View request object\n recipient_view_request = RecipientViewRequest(\n authentication_method=authentication_method,\n client_user_id=envelope_args[\"signer_client_id\"],\n recipient_id=\"1\",\n return_url=envelope_args[\"ds_return_url\"],\n user_name=envelope_args[\"signer_name\"],\n email=envelope_args[\"signer_email\"]\n )\n # 4. Obtain the recipient_view_url for the signing ceremony\n # Exceptions will be caught by the calling function\n results = envelope_api.create_recipient_view(\n account_id=args[\"account_id\"],\n envelope_id=envelope_id,\n recipient_view_request=recipient_view_request\n )\n\n return {\"envelope_id\": envelope_id, \"redirect_url\": results.url}\n\n @classmethod\n def make_envelope(cls, args):\n \"\"\"\n Creates envelope\n args -- parameters for the envelope:\n signer_email, signer_name, signer_client_id\n returns an envelope definition\n \"\"\"\n\n # document 1 (pdf) has tag /sn1/\n #\n # The envelope has one recipient.\n # recipient 1 - signer\n with open(path.join(demo_docs_path, DS_CONFIG[\"doc_pdf\"]), \"rb\") as file:\n content_bytes = file.read()\n base64_file_content = base64.b64encode(content_bytes).decode(\"ascii\")\n\n # Create the document model\n document = Document( # create the DocuSign document object\n document_base64=base64_file_content,\n name=\"Example document\", # can be different from actual file name\n file_extension=\"pdf\", # many different document types are accepted\n document_id=1 # a label used to reference the doc\n )\n\n # Create the signer recipient model\n signer = Signer(\n # The signer\n email=args[\"signer_email\"],\n name=args[\"signer_name\"],\n recipient_id=\"1\",\n routing_order=\"1\",\n # Setting the client_user_id marks the signer as embedded\n client_user_id=args[\"signer_client_id\"]\n )\n\n # Create a sign_here tab (field on the document)\n sign_here = SignHere(\n # DocuSign SignHere field/tab\n anchor_string=\"/sn1/\",\n anchor_units=\"pixels\",\n anchor_y_offset=\"10\",\n anchor_x_offset=\"20\"\n )\n\n # Add the tabs model (including the sign_here tab) to the signer\n # The Tabs object wants arrays of the different field/tab types\n signer.tabs = Tabs(sign_here_tabs=[sign_here])\n\n # Next, create the top level envelope definition and populate it.\n envelope_definition = EnvelopeDefinition(\n email_subject=\"Please sign this document sent from the Python SDK\",\n documents=[document],\n # The Recipients object wants arrays for each recipient type\n recipients=Recipients(signers=[signer]),\n status=\"sent\" # requests that the envelope be created and sent.\n )\n\n return envelope_definition\n","sub_path":"app/eg001_embedded_signing/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231287749","text":"import logging\nimport formencode\n\nfrom formencode import htmlfill\n\nfrom simplesite.lib import helpers as h\n\nfrom pylons import request, response, session, tmpl_context as c, url\nfrom pylons.controllers.util import abort, redirect\nfrom pylons.decorators import validate\n\nfrom simplesite.model import Video, Tag, Association\nfrom simplesite.lib.base import BaseController, render\nfrom simplesite.model.meta import Session\n\n\nlog = logging.getLogger(__name__)\n\nclass NewVideoForm(formencode.Schema):\n\tallow_extra_fields = True\n\tfilter_extra_fields = True\n\ttitle = formencode.validators.String(not_empty=True)\n\turl = formencode.validators.String(not_empty=True)\n\nclass VideoController(BaseController):\n\n\tdef __before__(self):\n\t\tself.video_q = Session.query(Video)\n\n\tdef list(self):\n\t\tc.videos = self.video_q\n\t\treturn render('video/list.html')\n\n\tdef show(self, id=None):\n\t\tif id is None:\n\t\t\tabort(404)\n\t\tc.video = self.video_q.get(int(id))\n\t\tif c.video is None:\n\t\t\tabort(404)\n\t\treturn render('video/show.html')\n\n\tdef new(self):\n\t\ttags = Session.query(Tag).all()\n\t\treturn render('video/new.html', {'tags': tags})\n\n\t@validate(schema=NewVideoForm(), form='new')\n\tdef create(self):\n\t\tvideo = Video()\n\t\tfor k, v in self.form_result.items():\n\t\t\tsetattr(video, k, v)\n\n\t\ttag_list = request.params.getall('tag_id')\n\t\tfor tag_id in tag_list:\n\t\t\ttag = Session.query(Tag).filter_by(id=tag_id).first()\n\t\t\tvideo.tags.append(tag)\n\n\t\tSession.add(video)\n\t\tSession.commit()\n\t\treturn redirect(url(controller='video', action='list'))\n\n\tdef edit(self, id=None):\n\t\tif id is None:\n\t\t\tabort(404)\n\t\tvideo = self.video_q.filter_by(id=id).first()\n\t\tif video is None:\n\t\t\tabort(404)\n\t\tvalues = {\n\t\t\t'title': video.title,\n\t\t\t'url': video.url\n\t\t}\n\t\tc.title = video.title\n\t\treturn htmlfill.render(render('video/edit.html'), values)\n\n\t@validate(schema=NewVideoForm(), form='edit')\n\tdef save(self, id=None):\n\t\tvideo = self.video_q.filter_by(id=id).first()\n\t\tif video is None:\n\t\t\tabort(404)\n\t\tfor k, v in self.form_result.items():\n\t\t\tif getattr(video, k) != v:\n\t\t\t\tsetattr(video, k, v)\n\t\tSession.commit()\n\n\t\treturn redirect(url(controller='video', action='show', id=video.id))\n\n\tdef delete(self, id=None):\n\t\tif id is None:\n\t\t\tabort(404)\n\t\tvideo = self.video_q.filter_by(id=id).first()\n\t\tif video is None:\n\t\t\tabort(404)\n\t\tSession.delete(video)\n\t\tSession.commit()\n\t\treturn render('video/deleted.html')\n\n\n\n","sub_path":"SimpleSite/simplesite/controllers/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"476663858","text":"import unittest\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nfrom numpy.testing import assert_array_equal\nfrom pandas.util.testing import assert_series_equal, assert_frame_equal\n\nfrom ram.strategy.intraday_reversion.src.intraday_return_simulator import *\n\n\nclass TestIntradayReturnSimulator(unittest.TestCase):\n\n def setUp(self):\n data = pd.DataFrame([])\n data['Ticker'] = ['SPY'] * 5\n data['Date'] = [dt.date(2010, 1, i) for i in range(1, 6)]\n data['signal'] = [0, 0, 1, 0, 1]\n self.data = data\n data2 = pd.DataFrame([[0, 0, 0], [1, -1, 0.01],\n [2, -2, 0.01], [-3., 3., 0.01]],\n index=[dt.time(9, 31), dt.time(9, 32),\n dt.time(9, 33), dt.time(9, 34)],\n columns=[dt.date(2010, 1, 1),\n dt.date(2010, 1, 2),\n dt.date(2010, 1, 3)])\n data2.index.name = 'Time'\n data2.columns.name = 'Date'\n self.data2 = data2\n\n def Xtest_get_returns(self):\n irs = IntradayReturnSimulator()\n returns_df = pd.DataFrame()\n returns_df['Date'] = [\n dt.date(2010, 1, 1), dt.date(2010, 1, 2), dt.date(2010, 1, 3),\n dt.date(2010, 1, 4), dt.date(2010, 1, 5)] * 2\n returns_df['Ticker'] = 'SPY'\n returns_df['Return'] = [10, 10, -10, 10, 10] + [5, -5, 5, -5, 5]\n returns_df['perc_take'] = [0.004] * 5 + [0.008] * 5\n returns_df['perc_stop'] = [0.002] * 10\n returns_df['signal'] = 1\n returns_df2 = returns_df.copy()\n returns_df2.Return *= -3\n returns_df2.signal = -1\n returns_df = returns_df.append(returns_df2).reset_index(True)\n irs._return_data = {}\n irs._return_data['SPY'] = returns_df\n #\n signals = self.data.copy()\n signals['perc_take'] = [0.004, 0.008, 0.004, 0.004, 0.008]\n signals['perc_stop'] = 0.002\n signals['signal'] = [0, -1, 1, 0, -1]\n result = irs.get_returns(signals)\n benchmark = pd.Series([0, 15, -10, 0, -15.],\n index=[dt.date(2010, 1, 1), dt.date(2010, 1, 2),\n dt.date(2010, 1, 3), dt.date(2010, 1, 4),\n dt.date(2010, 1, 5)])\n benchmark.index.name = 'Date'\n assert_series_equal(result[0], benchmark)\n\n def test_get_returns_from_signals(self):\n irs = IntradayReturnSimulator()\n longs = pd.Series([10, 10, -10, 10],\n index=[dt.date(2010, 1, i) for i in range(2, 6)])\n shorts = pd.Series([-20, -20, 20, -20],\n index=[dt.date(2010, 1, i) for i in range(2, 6)])\n signals = pd.DataFrame({\n 'Ticker': ['SPY'] * 6,\n 'Date': [dt.date(2010, 1, i) for i in range(1, 7)],\n 'signal': [1, 1, -1, 1, -1, 1]\n })\n result = irs._get_returns_from_signals(signals, longs, shorts)\n benchmark = pd.Series(\n [0, 10, -20, -10, -20, 0.], name='SPY',\n index=[dt.date(2010, 1, i) for i in range(1, 7)])\n benchmark.index.name = 'Date'\n assert_series_equal(result, benchmark)\n\n def test_get_responses(self):\n irs = IntradayReturnSimulator()\n # Add test bar data\n slippage = pd.Series([0.001, 0.001, 0.001],\n index=[dt.date(2010, 1, i) for i in range(1, 4)])\n tcost = slippage.copy()\n irs._hlc_rets_data['SPY'] = (self.data2, self.data2, self.data2)\n irs._cost_data['SPY'] = (slippage, tcost)\n result = irs.get_responses('SPY', 1, .3)\n benchmark = pd.DataFrame(index=[dt.date(2010, 1, 1),\n dt.date(2010, 1, 2),\n dt.date(2010, 1, 3)])\n benchmark['Ticker'] = 'SPY'\n benchmark['response'] = [1, -1, 0]\n benchmark.response = benchmark.response.astype(int)\n result.response = result.response.astype(int)\n assert_frame_equal(result, benchmark)\n\n def Xtest_preprocess_returns(self):\n irs = IntradayReturnSimulator()\n irs.preprocess_returns([0.004, 0.008], 0.002, 'SPY')\n\n def test_get_ticker_stats(self):\n returns = pd.DataFrame(index=[dt.date(2009, 12, 31),\n dt.date(2010, 1, 1),\n dt.date(2010, 1, 2),\n dt.date(2010, 1, 3),\n dt.date(2010, 1, 4)])\n returns['SPY'] = [0, 10, 10, -10, -10]\n returns['VXX'] = [0, np.nan, 10, 0, 0]\n returns['IWM'] = [0, 1, 2, 0, np.nan]\n irs = IntradayReturnSimulator()\n result = irs._get_ticker_stats(returns)\n self.assertAlmostEqual(result['win_percent_SPY'], 1/2.)\n self.assertAlmostEqual(result['win_percent_IWM'], 1)\n self.assertAlmostEqual(result['win_percent_VXX'], 1.0)\n self.assertAlmostEqual(result['participation_SPY'], 1.0)\n self.assertAlmostEqual(result['participation_IWM'], 2/3.)\n self.assertAlmostEqual(result['participation_VXX'], 1/3.)\n self.assertAlmostEqual(result['total_return_SPY'], 0)\n self.assertAlmostEqual(result['total_return_IWM'], 3)\n self.assertAlmostEqual(result['total_return_VXX'], 10)\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"ram/strategy/intraday_reversion/tests/test_intraday_return_simulator.py","file_name":"test_intraday_return_simulator.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"261278379","text":"import logging\n\n\nFORMAT = '%(asctime)s :: %(levelname)s :: %(name)s :: Line No %(lineno)d'\\\n ':: %(message)s'\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.ERROR)\n\nfh = logging.FileHandler('mymod_records.log')\nfh.setLevel(logging.ERROR)\n\nformatter = logging.Formatter(FORMAT)\nfh.setFormatter(formatter)\n\nlogger.addHandler(fh)\n\nlogger.error(\"This is a critical message!. Don't ignore it\")\n","sub_path":"D13/logging/marv/mymod.py","file_name":"mymod.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"391323502","text":"import os\nfrom flask import Flask, request, redirect, url_for, send_from_directory\nfrom werkzeug.utils import secure_filename\nimport zipfile\nimport json\n\nUPLOAD_FOLDER = 'uploads'\nALLOWED_EXTENSIONS = set(['zip'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\ndef unzipfile(filepath):\n zf = zipfile.ZipFile(filepath, 'r')\n zf.extractall(app.config['UPLOAD_FOLDER'])\n for name in zf.namelist():\n if name.rsplit('.', 1)[1] == 'shp':\n return name\n\ndef shp2geojson(filename):\n import subprocess\n # HEROKU\n subprocess.call(\"ogr2ogr -t_srs EPSG:4326 -f GeoJSON uploads/\"+filename+\".geojson \" + os.path.join(app.config['UPLOAD_FOLDER'], filename), shell=True)\n # LOCAL DEV\n # subprocess.call(\"/Library/Frameworks/GDAL.framework/Versions/Current/Programs/ogr2ogr -t_srs EPSG:4326 -f GeoJSON uploads/\"+filename+\".geojson \" + os.path.join(app.config['UPLOAD_FOLDER'], filename), shell=True)\n return \"uploads/\"+filename+\".geojson\"\n\ndef open_geojson(geojson_path):\n\n json_data=open(geojson_path)\n data = json.load(json_data)\n json_data.close()\n return data\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n shapefile_path = unzipfile('uploads/'+filename)\n # srid = request.form['srid']\n geojson_path = shp2geojson(shapefile_path)\n data = open_geojson(geojson_path)\n return json.dumps(data)\n\n return '''\n \n Upload new File\n

    Upload new File

    \n
    \n

    \n \n

    \n '''\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"550825818","text":"import os\n\ndef decrypt(file_name):\n path = os.path.abspath(file_name)\n my_file = open(path,'r')\n for lines in my_file:\n actual_line = my_file.readline() \n a = actual_line.split() \n print('\\n')\n for x in a:\n print(x[::2]+ ' ', end = \"\")\n \ndecrypt(\"duplicated_chars.txt\")\n","sub_path":"week-03/day-2/doubled.py","file_name":"doubled.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"50040614","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass, field, asdict\nimport datetime\nimport typing as t\nimport uuid\n\nimport sqlalchemy\nfrom sqlalchemy.dialects.postgresql import JSONB, DATE, TIMESTAMP, UUID\nfrom sqlalchemy.ext.hybrid import ExprComparator, hybrid_property\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.schema import Table\n\n\nmetadata = sqlalchemy.MetaData()\n\ndeployment_table = Table(\n \"deployments\",\n metadata,\n sqlalchemy.Column(\"id\", UUID(as_uuid=True), primary_key=True),\n sqlalchemy.Column(\"uri\", sqlalchemy.String, index=False),\n sqlalchemy.Column(\"name\", sqlalchemy.String, index=False, nullable=True),\n sqlalchemy.Column(\"colour\", sqlalchemy.String, index=False, nullable=True),\n sqlalchemy.Column(\"api_key\", sqlalchemy.String, index=False),\n)\n\ndatapoint_table = Table(\n \"datapoints\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column(\"sensor_name\", sqlalchemy.String, index=True),\n sqlalchemy.Column(\"collected_at\", TIMESTAMP, index=True),\n sqlalchemy.Column(\"deployment_id\", UUID(as_uuid=True), index=True),\n sqlalchemy.Column(\"data\", JSONB),\n)\n\ndaily_summary_view = Table(\n \"daily_summary\",\n metadata,\n sqlalchemy.Column(\"sensor_name\", sqlalchemy.String),\n sqlalchemy.Column(\"data\", JSONB),\n sqlalchemy.Column(\"count\", sqlalchemy.Integer),\n info={\"is_view\": True},\n)\n\n\nclass DateEqualComparator(ExprComparator):\n def __init__(self, fallback_expression, raw_expression):\n # Do not try and find update expression from parent\n super().__init__(None, fallback_expression, None)\n self.raw_expression = raw_expression\n\n def __eq__(self, other):\n \"\"\" Returns True iff on the same day as other \"\"\"\n other_date = sqlalchemy.cast(other, DATE)\n return sqlalchemy.and_(\n self.raw_expression >= other_date, self.raw_expression < other_date + 1,\n )\n\n def operate(self, op, *other, **kwargs):\n other = [sqlalchemy.cast(date, DATE) for date in other]\n return op(self.expression, *other, **kwargs)\n\n def reverse_operate(self, op, other, **kwargs):\n other = [sqlalchemy.cast(date, DATE) for date in other]\n return op(other, self.expression, **kwargs)\n\n\n@dataclass\nclass DataPoint:\n sensor_name: str\n data: t.Any\n deployment_id: uuid.UUID\n id: t.Optional[int] = None\n collected_at: datetime.datetime = field(default_factory=datetime.datetime.now)\n\n @classmethod\n def from_sql_result(cls, result) -> DataPoint:\n return cls(**result._asdict())\n\n def _asdict(self) -> t.Dict[str, t.Any]:\n data = asdict(self)\n if data[\"id\"] is None:\n del data[\"id\"]\n return data\n\n @hybrid_property\n def collected_on_date(self):\n return self.collected_at.date()\n\n @collected_on_date.comparator # type: ignore\n def collected_on_date(cls):\n return DateEqualComparator(\n fallback_expression=sqlalchemy.cast(datapoint_table.c.collected_at, DATE),\n raw_expression=datapoint_table.c.collected_at,\n )\n\n\n@dataclass\nclass Deployment:\n id: t.Optional[uuid.UUID]\n uri: str\n name: t.Optional[str]\n colour: t.Optional[str]\n api_key: t.Optional[str]\n\n @classmethod\n def from_sql_result(cls, result) -> Deployment:\n return cls(**result._asdict())\n\n def _asdict(self) -> t.Dict[str, t.Any]:\n data = asdict(self)\n return data\n\n\ndef main() -> None:\n engine = sqlalchemy.create_engine(\n \"postgresql+psycopg2://apd@localhost/apd\", echo=True\n )\n sm = sessionmaker(engine)\n Session = sm()\n if False:\n metadata.create_all(engine)\n print(Session.query(DataPoint).all())\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/apd/aggregation/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"440796765","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@Filename: handwriting.py\n@Author: yew1eb\n@Date: 2015/12/23 0023\n\"\"\"\n\n\nimport numpy as np\n\n\n\nfrom numpy import *\nimport csv\n\ndef load_data():\n train_data = np.loadtxt('d:\\\\dataset\\\\digits\\\\train.csv', dtype=np.uint8,delimiter=',', skiprows=1)\n test_data = np.loadtxt('d:\\\\dataset\\\\digits\\\\test.csv', dtype=np.uint8,delimiter=',', skiprows=1)\n label = train_data[:,:1]\n data = np.where(train_data[:, 1:]!=0, 1, 0)# 数据归一化\n test = np.where(test_data !=0, 1, 0)\n return data, label, test\n\n\n#result是结果列表\n#csvName是存放结果的csv文件名\ndef save_result(result,csvName):\n with open('d:\\\\dataset\\\\digits\\\\'+csvName,'wb') as myFile:\n myWriter=csv.writer(myFile)\n for i in result:\n tmp=[]\n tmp.append(i)\n myWriter.writerow(tmp)\n\n\n#调用scikit的knn算法包\nfrom sklearn.neighbors import KNeighborsClassifier\ndef knnClassify(train_data,train_label,test_data):\n knnClf=KNeighborsClassifier()\n knnClf.fit(train_data,ravel(train_label))\n test_label=knnClf.predict(test_data)\n save_result(test_label,'sklearn_knn_result.csv')\n return test_label\n\n#调用scikit的SVM算法包\nfrom sklearn import svm\ndef svcClassify(train_data,train_label,test_data):\n svcClf=svm.SVC(C=5.0) #default:C=1.0,kernel = 'rbf'. you can try kernel:‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’\n svcClf.fit(train_data,ravel(train_label))\n test_label=svcClf.predict(test_data)\n saveResult(test_label,'sklearn_SVC_C=5.0_Result.csv')\n return test_label\n\n#调用scikit的朴素贝叶斯算法包,GaussianNB和MultinomialNB\nfrom sklearn.naive_bayes import GaussianNB #nb for 高斯分布的数据\ndef GaussianNBClassify(train_data,train_label,test_data):\n nbClf=GaussianNB()\n nbClf.fit(train_data,ravel(train_label))\n test_label=nbClf.predict(test_data)\n saveResult(test_label,'sklearn_GaussianNB_Result.csv')\n return test_label\n\nfrom sklearn.naive_bayes import MultinomialNB #nb for 多项式分布的数据\ndef MultinomialNBClassify(train_data,train_label,test_data):\n nbClf=MultinomialNB(alpha=0.1) #default alpha=1.0,Setting alpha = 1 is called Laplace smoothing, while alpha < 1 is called Lidstone smoothing.\n nbClf.fit(train_data,ravel(train_label))\n test_label=nbClf.predict(test_data)\n saveResult(test_label,'sklearn_MultinomialNB_alpha=0.1_Result.csv')\n return test_label\n\n\ndef digitRecognition():\n train_data,train_label, test_data=load_data()\n\n #使用不同算法\n result1=knnClassify(train_data,train_label,test_data)\n #result2=svcClassify(train_data,train_label,test_data)\n #result3=GaussianNBClassify(train_data,train_label,test_data)\n #result4=MultinomialNBClassify(train_data,train_label,test_data)\n\n\nif __name__ == '__main__':\n digitRecognition()\n\n\n\n\n\n\n\n\n\n","sub_path":"Kaggle-digit-recognizer/knn/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382617483","text":"import unittest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import testing as pdt\n\nfrom ...routines import matrix_balancing_1d, matrix_balancing_2d, matrix_bucket_rounding, aggregate_matrix\n\nclass TestMatrixBucketRounding(unittest.TestCase):\n\n def test_small(self):\n \"\"\" Test bucket rounding routine on a small matrix to various levels of rounding precision. \"\"\"\n a = np.random.uniform(0, 1000, (5, 5))\n for decimal in range(-2, 6):\n a_rnd = matrix_bucket_rounding(a, decimal)\n self._compare_matrix_sums(a_rnd, a, decimal)\n self._compare_matrix_values(a_rnd, a, decimal)\n\n def test_return_type(self):\n \"\"\" Test that bucket rounding returns an integer or float dtype, where appropriate. \"\"\"\n a = np.random.uniform(0, 1000, (5, 5))\n\n # first test, float return\n b = matrix_bucket_rounding(a, decimals=2)\n self.assertEqual(b.dtype, a.dtype, \"dtype of bucket rounded matrix is not equal to dtype of input matrix\")\n # second test, int return\n b = matrix_bucket_rounding(a, decimals=0)\n self.assertEqual(b.dtype, np.dtype('int32'), \"dtype of bucket rounded matrix is not integer\")\n\n def test_large(self):\n \"\"\" Test bucket rounding routine on a large matrix to various levels of rounding precision. \"\"\"\n a = np.random.uniform(0, 1000, (1000, 1000))\n for decimal in [-2, 0, 5]:\n a_rnd = matrix_bucket_rounding(a, decimal)\n self._compare_matrix_sums(a_rnd, a, decimal)\n self._compare_matrix_values(a_rnd, a, decimal)\n\n def test_pandas_import(self):\n \"\"\"Test return type and values if matrix is passed as a Pandas DataFrame. \"\"\"\n a = np.random.uniform(0, 1000, (5, 5))\n df = pd.DataFrame(a)\n decimals = 3\n df_rnd = matrix_bucket_rounding(df, decimals=decimals)\n self._compare_matrix_sums(df.values, df_rnd.values, decimals)\n self._compare_matrix_values(df.values, df_rnd.values, decimals)\n self.assertEqual(type(df_rnd), pd.DataFrame, \"dtype of returned matrix is a Pandas DataFrame\")\n\n def _compare_matrix_sums(self, a, b, decimal):\n max_error = 0.5*(10.0 ** (-decimal))\n a_sum = np.sum(a)\n b_sum = np.sum(b)\n self.assertLessEqual(a_sum, b_sum + max_error, \"Bucket rounded matrix is not within a small margin of error\")\n self.assertGreaterEqual(a_sum, b_sum - max_error, \"Bucket rounded matrix is not within a small margin of error\")\n\n def _compare_matrix_values(self, a, b, decimal):\n max_error = 10.0 ** (-decimal)\n np.testing.assert_allclose(a, b, atol=max_error, rtol=0.0, \n err_msg=\"Bucket rounded matrix values are not within %f\" % (max_error))\n\nclass TestAggregateMatrix(unittest.TestCase):\n\n def setUp(self):\n self._square_symatrix = np.array([\n [4, 9, 1, 1, 9, 0, 4, 4, 9],\n [10, 0, 0, 10, 2, 8, 0, 10, 9],\n [8, 9, 9, 6, 7, 7, 4, 1, 8],\n [9, 8, 1, 10, 6, 7, 2, 1, 2],\n [3, 3, 10, 5, 3, 9, 7, 9, 4],\n [1, 5, 1, 1, 7, 4, 2, 9, 0],\n [4, 3, 4, 1, 5, 9, 3, 7, 5],\n [2, 3, 7, 2, 2, 10, 2, 3, 5],\n [5, 10, 4, 9, 1, 5, 4, 4, 7]\n ])\n self._numeric_index = pd.Index([10, 11, 12, 20, 21, 22, 30, 31, 32])\n self._square_symatrix = pd.DataFrame(self._square_symatrix, index=self._numeric_index, columns=self._numeric_index)\n self._tall_symatrix = self._square_symatrix.stack()\n\n self._square_dmatrix = np.array([\n [1, 3, 2, 9],\n [5, 0, 4, 7],\n [10, 5, 4, 0],\n [6, 1, 8, 9],\n [10, 5, 0, 3],\n [4, 8, 8, 6],\n [1, 0, 4, 1],\n [2, 3, 4, 6],\n [3, 0, 0, 7],\n ])\n self._text_index = pd.Index(['A1', 'A2', 'B1', 'B2'])\n self._square_dmatrix = pd.DataFrame(self._square_dmatrix, index=self._numeric_index, columns=self._text_index)\n self._tall_dmatrix = self._square_dmatrix.stack()\n\n self._grouper1 = pd.Series({\n 10: 1,\n 11: 1,\n 12: 1,\n 20: 2,\n 21: 2,\n 22: 2,\n 30: 3,\n 31: 3,\n 32: 3\n })\n\n self._grouper2 = pd.Series({\n 'A1': 'A',\n 'A2': 'A',\n 'B1': 'B',\n 'B2': 'B'\n })\n\n def test_square_symatrix(self):\n expected_result = pd.DataFrame([\n [50, 50, 49],\n [41, 52, 36],\n [42, 44, 40]\n ], index=[1, 2, 3], columns=[1, 2, 3])\n\n test1 = aggregate_matrix(self._square_symatrix, row_groups=self._grouper1, col_groups=self._grouper1)\n pdt.assert_frame_equal(expected_result, test1, check_dtype=False)\n\n test2 = aggregate_matrix(self._square_symatrix, groups=self._grouper1)\n pdt.assert_frame_equal(expected_result, test2, check_dtype=False)\n\n test3 = aggregate_matrix(self._square_symatrix, groups=self._grouper1.values)\n pdt.assert_frame_equal(expected_result, test3, check_dtype=False)\n\n def test_square_dmatrix(self):\n expected_result = pd.DataFrame([\n [24, 26],\n [34, 34],\n [9, 22],\n ], index=[1, 2, 3], columns=['A', 'B'])\n\n test1 = aggregate_matrix(self._square_dmatrix, row_groups=self._grouper1, col_groups=self._grouper2)\n pdt.assert_frame_equal(expected_result, test1, check_dtype=False)\n\n test2 = aggregate_matrix(self._square_dmatrix, row_groups=self._grouper1.values,\n col_groups=self._grouper2.values)\n pdt.assert_frame_equal(expected_result, test2, check_dtype=False)\n\n def test_tall_symatrix(self):\n expected_result = pd.DataFrame([\n [1, 1, 50],\n [1, 2, 50],\n [1, 3, 49],\n [2, 1, 41],\n [2, 2, 52],\n [2, 3, 36],\n [3, 1, 42],\n [3, 2, 44],\n [3, 3, 40],\n ], columns=['row', 'col', 'val']).set_index(['row', 'col'])['val']\n\n tall_row_grouper = self._grouper1.reindex(self._tall_symatrix.index, level=0)\n tall_col_grouper = self._grouper1.reindex(self._tall_symatrix.index, level=1)\n\n test1 = aggregate_matrix(self._tall_symatrix, row_groups=tall_row_grouper, col_groups=tall_col_grouper)\n pdt.assert_series_equal(expected_result, test1, check_dtype=False, check_names=False)\n\n test2 = aggregate_matrix(self._tall_symatrix, row_groups=self._grouper1, col_groups=self._grouper1)\n pdt.assert_series_equal(expected_result, test2, check_dtype=False, check_names=False)\n\n test3 = aggregate_matrix(self._tall_symatrix,\n row_groups=tall_row_grouper.values, col_groups=tall_col_grouper.values)\n pdt.assert_series_equal(expected_result, test3, check_dtype=False, check_names=False)\n\n test4 = aggregate_matrix(self._tall_symatrix, groups=self._grouper1)\n pdt.assert_series_equal(expected_result, test4, check_dtype=False, check_names=False)\n\n def test_tall_dmatrix(self):\n expected_result = pd.DataFrame([\n [1, 'A', 24],\n [1, 'B', 26],\n [2, 'A', 34],\n [2, 'B', 34],\n [3, 'A', 9],\n [3, 'B', 22]\n ], columns=['row', 'col', 'val']).set_index(['row', 'col'])['val']\n\n tall_row_grouper = self._grouper1.reindex(self._tall_dmatrix.index, level=0)\n tall_col_grouper = self._grouper2.reindex(self._tall_dmatrix.index, level=1)\n\n test1 = aggregate_matrix(self._tall_dmatrix, row_groups=tall_row_grouper, col_groups=tall_col_grouper)\n pdt.assert_series_equal(expected_result, test1, check_dtype=False, check_names=False)\n\n test2 = aggregate_matrix(self._tall_dmatrix, row_groups=self._grouper1, col_groups=self._grouper2)\n pdt.assert_series_equal(expected_result, test2, check_dtype=False, check_names=False)\n\n test3 = aggregate_matrix(self._tall_dmatrix,\n row_groups=tall_row_grouper.values, col_groups=tall_col_grouper.values)\n pdt.assert_series_equal(expected_result, test3, check_dtype=False, check_names=False)\n\nclass TestMatrixBalancing(unittest.TestCase):\n def setUp(self):\n self._square_matrix = np.random.uniform(0, 1000, (5, 5))\n self._1darray = np.random.uniform(0, 1000, 5)\n \n def test_1d_balance(self):\n axes = [0, 1]\n for ax in axes:\n test = matrix_balancing_1d(self._square_matrix, self._1darray, ax)\n self.assertAlmostEqual(test.sum(), self._1darray.sum(), places=5)\n pdt.assert_series_equal(pd.Series(np.sum(test, ax)), pd.Series(self._1darray))\n\n def test_2d_balance_matched_total(self):\n row = self._1darray\n column = np.roll(self._1darray, 2)\n\n test = matrix_balancing_2d(self._square_matrix, row, column, rel_error=0.000001)\n pdt.assert_series_equal(pd.Series(np.sum(test[0], 1)), pd.Series(row), check_less_precise=True)\n pdt.assert_series_equal(pd.Series(np.sum(test[0], 0)), pd.Series(column), check_less_precise=True)\n\n def test_2d_balance_average_total(self):\n row = self._1darray\n column = np.roll(np.sqrt(row), 2)\n\n test = matrix_balancing_2d(self._square_matrix, row, column, rel_error=0.000001, totals_to_use='average')\n self.assertAlmostEqual(test[0].sum().sum(), (row.sum() + column.sum())/2, places=5)\n\n def test_2d_balance_row_total(self):\n row = self._1darray\n column = np.sqrt(row)\n\n test = matrix_balancing_2d(self._square_matrix, row, column, rel_error=0.000001, totals_to_use='rows')\n pdt.assert_series_equal(pd.Series(np.sum(test[0], 1)), pd.Series(row), check_less_precise=True)\n\n def test_2d_balance_col_total(self):\n row = self._1darray\n column = np.sqrt(row)\n\n test = matrix_balancing_2d(self._square_matrix, row, column, rel_error=0.000001, totals_to_use='columns')\n pdt.assert_series_equal(pd.Series(np.sum(test[0], 0)), pd.Series(column), check_less_precise=True)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"balsa/test/routines/test_matrices.py","file_name":"test_matrices.py","file_ext":"py","file_size_in_byte":10182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593110389","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom app import app, wrapper_div_id\nfrom apps import testing, playground, positives, deaths, homepage, fourohfour\n\n# this initial layout is an empty div with our wrapper_div_id from the app\n# also notice the dcc.Location object. This tracks what page we're on.\n# We can also use it to do fancy stuff like track filter values if we want.\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False),\n html.Div(id=wrapper_div_id)\n])\n\n\n@app.callback(\n dash.dependencies.Output(wrapper_div_id, 'children'),\n [dash.dependencies.Input('url', 'pathname')]\n)\ndef display_page(pathname):\n if pathname == '/':\n return homepage.layout\n elif pathname == '/testing':\n return testing.layout\n elif pathname == '/positives':\n return positives.layout\n elif pathname == '/deaths':\n return deaths.layout\n elif pathname == '/playground':\n return playground.layout\n else:\n return fourohfour.layout\n\n\nif __name__ == '__main__':\n # app.run_server(debug=True)\n app.run_server(debug=False)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"243636059","text":"\nfrom itertools import islice\nfrom os import name\nimport time,random\nimport memory_profiler as mem_profile\n\nnames = [\"A\",\"B\",\"C\",\"D\",\"e\",\"f\"]\nmajors = [\"egg\",\"monday\",\"Com\",\"thursday\",\"bot\"]\n\n\nprint(f'Memory (Before) do func: {mem_profile.memory_usage()}Mb')\n\n@mem_profile.profile\ndef people_list(num_people):\n result = []\n for i in range(num_people):\n person = {\n \"id\":i,\n \"name\":random.choice(names),\n \"major\":random.choice(majors)\n\n }\n\n\n result.append(person)\n return result \n\n\n# @mem_profile.profile\ndef people_generator(num_people):\n for i in range(num_people):\n person = {\n \"id\":i,\n \"name\":random.choice(names),\n \"major\":random.choice(majors)\n }\n\n yield person \n@mem_profile.profile\ndef my_func():\n a = [1] * (10 ** 6)\n b = [2] * (2 * 10 ** 7)\n del b\n return a\n\n\n\nt1=time.time()\npeople_list(10000)\nt2=time.time()\n\n# t1=time.time()\n# people_generator(10000000)\n# t2=time.time()\n# my_func()\n\n\nprint('Memory (After): {} MB'.format(mem_profile.memory_usage()))\nprint ('Took {} seconds'.format(t2-t1))\n\"\"\"\nThis is for test\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\"\"\"\n\n\ndef show (num):\n print(\"Starting\")\n while num > 0:\n yield num\n num -=1\n\n\n\neven_nums = (x for x in range(100000) if x %2 ==0)\n# print(next(even_nums))\nprint(list(even_nums))\n\n\n# lst = [ part for part in islice(even_nums,3)]\n \n# print(lst)\n# lst.append(next(even_nums))\n# lst.append(next(even_nums))\n# lst.append(next(even_nums))\n# print(lst)\n\ndef \n\n\"\"\"\n\n\n","sub_path":"generator-test.py","file_name":"generator-test.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479400845","text":"#!/usr/bin/env python3\n# Pictures ディレクトリ内のサブ・ディレクトリを Pictures テーブルに登録する。\n# (登録済みのディレクトリは、二重登録しない)\n# 表示される SQL をリダイレクトしてファイル保存して実行すること。\n'''\nUSE user\nCREATE TABLE `Pictures` (\n `ID` int(11) NOT NULL AUTO_INCREMENT,\n `TITLE` varchar(50) NOT NULL,\n `CREATOR` varchar(50) NOT NULL,\n `PATH` varchar(500) NOT NULL,\n `MARK` varchar(10) DEFAULT NULL,\n `INFO` varchar(100) DEFAULT NULL,\n `FAV` char(1) DEFAULT '0',\n `COUNT` int(8) DEFAULT '0',\n PRIMARY KEY (`ID`)\n) ENGINE=InnoDB AUTO_INCREMENT=279 DEFAULT CHARSET=utf8;\n'''\n\n\nfrom Py365Lib import MySQL, Common, Text, FileSystem as fs\n\n# サブディレクトリを検索して、Pictures テーブルに登録する。\ndef insertFolders(parentDir) :\n dirs = fs.listDirectories(parentDir)\n for dir in dirs :\n # 作者名\n parts = Text.split(\"/\", dir)\n creator = parts[len(parts) - 1]\n # 作品名s\n subdirs = fs.listDirectories(dir)\n if len(subdirs) == 0 :\n p = Text.split(' ', creator)\n creator = p[0]\n insertData(dir, creator)\n else :\n for subdir in subdirs :\n insertData(subdir, creator)\n return\n\n\n\n# データをMySQLに挿入する。\ndef insertData(dir, creator) :\n global mark\n global cmd\n parts = Text.split('/', dir)\n title = parts[len(parts) - 1]\n cmd = f\"INSERT INTO Pictures(TITLE, CREATOR, `PATH`, MARK) VALUES('{title}', '{creator}', '{dir}', '{mark}');\"\n print(cmd)\n return\n\n\n## プログラム開始\nif Common.count_args() == 0 :\n Common.stop(1, \"親ディレクトリを指定してください。\", Common.ESC_FG_YELLOW)\n\ncmd = \"\"\nmark = 'NONE'\n\nparentDir = Common.args()[0]\nif Common.count_args() > 1 :\n mark = Common.args()[1]\ninsertFolders(parentDir)\n","sub_path":"MySQL/Ins_Pictures.py","file_name":"Ins_Pictures.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316659951","text":"# _*_ coding: utf-8 _*_\n\"\"\"\n更新OTS缓存\n\"\"\"\nfrom src.head import *\n\n\nclass NewThirdEntPlugin06:\n\n def __init__(self):\n pass\n\n def process(self, data_instance):\n \"\"\"\n 接口函数\n :param data_instance:\n :return:\n \"\"\"\n # 判断是否是实时股价更新的数据\n if self.__is_stock_price_data(data_instance):\n self.__update_ots_catch_price(data_instance)\n return\n\n # 判断是否为新三板财务数据\n if self.__is_financial_third_ent(data_instance):\n return\n\n if data_instance.dict[\"invalid\"]:\n return\n\n if data_instance.dict[\"deletedata\"] == 1 and len(data_instance.dict[\"eid\"]) > 0:\n eid = data_instance.dict[\"eid\"]\n tags, basic_info_dict, neeq_info_dict = self.__get_ots_cache_tags(eid)\n try:\n if basic_info_dict is None:\n logging.debug(\"No CACHE in OTS for eid {0}\".format(eid))\n else:\n is_existing = False\n for i in tags:\n if i[\"name\"] == \"新三板\" or i[\"name\"] == u\"新三板\":\n is_existing = True\n i[\"code\"] = 0\n break\n if not is_existing:\n tags.append({\"code\": 0, \"name\": \"新三板\"})\n basic_info_dict[\"tags\"] = tags\n self.__update_ots_cache_tags(eid, basic_info_dict)\n if not neeq_info_dict:\n RedisHub.rpush(\"OTS\", \"com.qxb.dbcache.update\", json.dumps({\"eid\": eid, \"gs\": True, \"qy\": False}))\n except Exception as e:\n logging.exception(\"Update OTSCache error\")\n elif data_instance.dict[\"deletedata\"] == 0 and len(data_instance.dict[\"eid\"]) > 0:\n eid = data_instance.dict[\"eid\"]\n tags, basic_info_dict, neeq_info_dict = self.__get_ots_cache_tags(eid)\n try:\n if basic_info_dict is None:\n logging.debug(\"No CACHE in OTS for eid {0}\".format(eid))\n else:\n is_existing = False\n for i in tags:\n if i[\"name\"] == \"新三板\" or i[\"name\"] == u\"新三板\":\n is_existing = True\n i[\"code\"] = 1\n break\n if not is_existing:\n tags.append({\"code\": 1, \"name\": \"新三板\"})\n basic_info_dict[\"tags\"] = tags\n self.__update_ots_cache_tags(eid, basic_info_dict)\n if not neeq_info_dict:\n RedisHub.rpush(\"OTS\", \"com.qxb.dbcache.update\", json.dumps({\"eid\": eid, \"gs\": True, \"qy\": False}))\n except Exception as e:\n logging.exception(\"Update OTSCache error\")\n return\n\n def __update_ots_catch_price(self, data_instance):\n \"\"\"\n 更新OTS中的股价相关信息\n :param data_instance:\n :return:\n \"\"\"\n neeq_info = self.__get_ots_catch_neeq_info(data_instance.dict[\"eid\"])\n if neeq_info:\n neeq_info[\"tclose\"] = data_instance.dict[\"today_price\"]\n neeq_info[\"zsz\"] = data_instance.dict[\"zsz\"]\n neeq_info[\"ltsz\"] = data_instance.dict[\"ltsz\"]\n neeq_info[\"pb\"] = data_instance.dict[\"pb\"]\n neeq_info[\"pe\"] = data_instance.dict[\"pe\"]\n neeq_info[\"volumn\"] = data_instance.dict[\"volumn\"]\n neeq_info[\"turnover\"] = data_instance.dict[\"turnover\"]\n neeq_info[\"chg\"] = data_instance.dict[\"chg\"]\n neeq_info[\"pchg\"] = data_instance.dict[\"pchg\"]\n neeq_info[\"datetime\"] = data_instance.dict[\"datetime\"]\n\n try:\n primary_key = {\"pk_eid\": data_instance.dict[\"eid\"]}\n columns_to_update = {\"put\": {\"neeq_info\": json.dumps(neeq_info)}}\n response = AliCacheTableStore.update_row(CONFIG_OTS_CACHE[\"table_name\"], primary_key, columns_to_update)\n logging.debug(\"__update_ots_catch_price --> {0}\".format(response))\n except Exception as e:\n logging.exception(\"Error when OTS update tags\")\n else:\n RedisHub.rpush(\"OTS\", \"com.qxb.dbcache.update\", json.dumps({\"eid\": data_instance.dict[\"eid\"], \"gs\": True, \"qy\": False}))\n return\n\n def __get_ots_catch_neeq_info(self, eid):\n \"\"\"\n 获取OTS中的neeq_info数据\n :param eid:\n :return:\n \"\"\"\n neeq_info = dict()\n try:\n primary_key = {\"pk_eid\": eid}\n columns_to_get = [\"eid\", \"neeq_info\"]\n ots_response = AliCacheTableStore.get_row(CONFIG_OTS_CACHE[\"table_name\"], primary_key, columns_to_get)\n if ots_response is not None and len(ots_response) == 3:\n ots_data = ots_response[2]\n if ots_data and \"eid\" in ots_data.keys() and \"neeq_info\" in ots_data.keys():\n neeq_info = json.loads(ots_data[\"neeq_info\"])\n except Exception as e:\n logging.exception(\"__get_ots_catch_neeq_info error!\")\n return neeq_info\n\n def __is_stock_price_data(self, data_instance):\n \"\"\"\n 判断是否是实时股价更新的数据\n :param data_instance:\n :return:\n \"\"\"\n status = False\n try:\n if \"eid\" in data_instance.dict.keys() and \"today_price\" in data_instance.dict.keys():\n status = True\n except Exception as e:\n logging.exception(\"__is_stock_price_data error!\")\n return status\n\n def __is_financial_third_ent(self, data_instance):\n \"\"\"\n 判断是否是新三板财务数据\n :param data_instance:\n :return:\n \"\"\"\n statue = False\n try:\n if \"finance_analysis\" in data_instance.dict.keys() and \"symbol\" in data_instance.dict.keys():\n statue = True\n except Exception as e:\n logging.exception(\"__is_financial_third_ent error!\")\n return statue\n\n def __get_ots_cache_tags(self, eid):\n \"\"\"\n 根据eid获取OTS的缓存\n :param eid:\n :return:\n \"\"\"\n tags = None\n basic_info_dict = None\n neeq_info_dict = None\n try:\n primary_key = {\"pk_eid\": eid}\n columns_to_get = [\"eid\", \"basic_info\", \"neeq_info\"]\n ots_response = AliCacheTableStore.get_row(CONFIG_OTS_CACHE[\"table_name\"], primary_key, columns_to_get)\n if ots_response is not None and len(ots_response) == 3:\n ots_data = ots_response[2]\n if ots_data and len(ots_data) > 0 and \"eid\" in ots_data.keys() and \"basic_info\" in ots_data.keys():\n tags = []\n basic_info_dict = json.loads(ots_data[\"basic_info\"])\n if \"tags\" in basic_info_dict.keys():\n tags = basic_info_dict[\"tags\"]\n if \"neeq_info\" in ots_data.keys() and ots_data[\"neeq_info\"]:\n neeq_info_dict = json.loads(ots_data[\"neeq_info\"])\n except Exception as e:\n logging.exception(\"Error when checking OTS cache\")\n return tags, basic_info_dict, neeq_info_dict\n\n def __update_ots_cache_tags(self, eid, new_basic_info_dict):\n \"\"\"\n 更新OTS的缓存\n :param eid:\n :param new_basic_info_dict:\n :return:\n \"\"\"\n try:\n primary_key = {\"pk_eid\": eid}\n columns_to_update = {\"put\": {\"basic_info\": json.dumps(new_basic_info_dict)}}\n response = AliCacheTableStore.update_row(CONFIG_OTS_CACHE[\"table_name\"], primary_key, columns_to_update)\n logging.debug(response)\n except Exception as e:\n logging.exception(\"Error when OTS update tags\")\n\n def __call__(self):\n pass\n","sub_path":"source/src/plugins/NewThirdEnt/NewThirdEntPlugin06.py","file_name":"NewThirdEntPlugin06.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556727492","text":"#!/usr/bin/env python3\n\nimport re\nimport os\nimport sys\nimport datetime\n\nfrom sys import stderr\nfrom datetime import date\n\nusage = \"Usage: make_pkgbuild.py \"\nrelease_number_regex = r\"CFG_RELEASE_NUM[ ]*=[ ]*(?P.*)\"\nrelease_label_regex = r\"CFG_RELEASE_LABEL[ ]*=[ ]*(?P.*)\"\n\ndef main():\n\t# Parse command-line args\n\tif len(sys.argv) != 3:\n\t\tprint(usage, file=stderr)\n\t\tsys.exit(1)\n\n\ttemplate_file = sys.argv[1]\n\trust_makefile = sys.argv[2]\n\n\t# Extract the version information from the Rust makefile\n\twith open(rust_makefile, \"r\") as f:\n\t\trust_mk_contents = f.readlines()\n\n\tr1 = re.compile(release_number_regex)\n\tr2 = re.compile(release_label_regex)\n\n\tversion_number = None\n\tversion_label = None\n\n\tfor line in rust_mk_contents:\n\t\tif version_number is not None and version_label is not None:\n\t\t\tbreak\n\n\t\tm1 = r1.match(line)\n\t\tif m1:\n\t\t\tversion_number = m1.group(\"value\")\n\t\t\tcontinue\n\n\t\tm2 = r2.match(line)\n\t\tif m2:\n\t\t\tversion_label = m2.group(\"value\").replace(\"-\", \"_\")\n\n\tdatestring = date.today().strftime(\"%Y.%m.%d\")\n\n\tversion = version_number + version_label + \"_\" + datestring\n\n\t# Write the PKGBUILD to stdout\n\twith open(template_file, \"r\") as f:\n\t\tpkgbuild = f.read()\n\n\tpkgbuild = pkgbuild.replace(\"{VERSION}\", version)\n\tsys.stdout.write(pkgbuild)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"make_pkgbuild.py","file_name":"make_pkgbuild.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73388429","text":"\n#典型的以空间换时间的算法 若以4层循环来做 内层用二分法来做 其时间复杂度为O(n^3logn)\nclass Solution:\n def helper(self,A,B,C,D):\n hashtable = {}\n for a in A:\n for b in B:\n if a + b in hashtable:\n hashtable[a + b] += 1\n else:\n hashtable[a + b] = 1\n count = 0\n for c in C:\n for d in D:\n if -c - d in hashtable:\n count += hashtable[-c - d]\n return count\n\ns = Solution()\nA = [ 1, 2]\nB = [-2,-1]\nC = [-1, 2]\nD = [ 0, 2]\nprint(s.helper(A,B,C,D))","sub_path":"leetcode/454.py","file_name":"454.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"54444273","text":"import torch\nimport os\nimport numpy as np\nimport random\nfrom torch.utils.data import Dataset,DataLoader\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport pandas as pd\n\n\nclass Data_ganerator(Dataset):\n def __init__(self,txt_file):\n with open(txt_file, 'r') as f:\n self.all_triplets = f.readlines()\n self.all_triplets = self.all_triplets[0:]\n self.transform1 = transforms.Compose([transforms.ToTensor()])\n\n def __getitem__(self, item):\n triplet = self.all_triplets[item].split(' ')\n image = triplet[0]\n face = self.load_image(image)\n return face\n\n\n def __len__(self):\n\n return len(self.all_triplets)\n\n def load_image(self,image_dir):\n image = Image.open(image_dir).convert('RGB')\n if image.size != (224, 224):\n image = image.resize((224, 224), resample=Image.BILINEAR)\n image = self.transform1(image)\n print('1111111111111111111',image)\n return image\n\n\n\nif __name__ == '__main__':\n dataset = Data_ganerator('/home/tione/notebook/untitled9_face_decoder/val.txt')\n loader = DataLoader(dataset,batch_size=2,shuffle=True,drop_last=True,num_workers=2)\n for step, (face) in enumerate(loader):\n print(face.shape)\n \n\n\n\n\n\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"646588534","text":"import cv2\nimport numpy as np\n\n# built-in module\nimport sys\nimport os\nimport shutil\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef detectEdge(file):\n\timg = cv2.imread(file);\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\tthrs1 = 1000\n\tthrs2 = 3000\n\tedge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5)\n\treturn edge\n\ndef main(directory):\n\tif directory[-1] == '/':\n\t\tdirectory = directory[:-1]\n\tfiles = [f for f in listdir(directory) if isfile(join(directory,f)) and f != '.DS_Store']\n\tresultdir = directory+\"_result\"\n\tif not os.path.isdir(resultdir):\n\t\tos.mkdir(resultdir)\n\tfor file in files:\n\t\tinputfile = join(directory,file)\n\t\tprint(inputfile)\n\t\tedge = detectEdge(inputfile)\n\t\tresultfile = join(resultdir,file)\n\t\tprint(resultfile)\n\t\tcv2.imwrite(resultfile, edge)\n\n\n\nif __name__ == '__main__':\n try:\n fn = sys.argv[1]\n except:\n fn = 0\n\n main(fn)","sub_path":"canny.py","file_name":"canny.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8039809","text":"import time\n\nfrom domain.neural_network.neural_network_results_saver import NeuralNetworkResultsSaver\nfrom domain.neural_network.training_data_provider import TrainingDataProvider\nfrom opencv_client.neural_network.neural_network_trainer import NeuralNetworkTrainer\n\n\nclass OpenCvNnTrainer():\n def __init__(self):\n self.trainingDataProvider = TrainingDataProvider()\n self.neuralNetworkTrainer = NeuralNetworkTrainer()\n self.neuralNetworkResultUploader = NeuralNetworkResultsSaver()\n\n def train_open_cv(self, request_id, people_with_image_paths):\n start_data = time.time()\n training_data = self.trainingDataProvider.get_training_data_for_neural_network(request_id,\n people_with_image_paths)\n end_data = time.time()\n data_preparation_time = end_data - start_data\n training_times = self.neuralNetworkTrainer.create_all_face_recognizers(request_id, training_data)\n self.neuralNetworkResultUploader.save_result_files(request_id, training_times, data_preparation_time)\n","sub_path":"FaceRecognition/domain/neural_network/opencv_nn_trainer.py","file_name":"opencv_nn_trainer.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"306561506","text":"from __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('video', '0002_snippet'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='asset',\n name='durationms',\n field=models.PositiveIntegerField(null=True),\n ),\n ]\n","sub_path":"video/migrations/0003_asset_durationms.py","file_name":"0003_asset_durationms.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"401340251","text":"\n\nfrom xai.brain.wordbase.verbs._sizzle import _SIZZLE\n\n#calss header\nclass _SIZZLES(_SIZZLE, ):\n\tdef __init__(self,): \n\t\t_SIZZLE.__init__(self)\n\t\tself.name = \"SIZZLES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"sizzle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_sizzles.py","file_name":"_sizzles.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"502086518","text":"#!/usr/bin/env python\n# \n# tournament.py -- implementation of a Swiss-system tournament\n#\n\nimport psycopg2\n\n\ndef connect():\n \"\"\"Connect to the PostgreSQL database. Returns a database connection.\"\"\"\n return psycopg2.connect(\"dbname=tournament\")\n\n\ndef deleteMatches():\n # \"\"\"Remove all the match records from the database.\"\"\"\n # DB = connect()\n # cur = DB.cursor()\n #\n # #Delete data from the matches table, but keep the table\n # cur.execute(\"delete from matches;\")\n # DB.commit()\n # DB.close()\n DB = connect()\n cur = DB.cursor()\n cur.execute(\"UPDATE players SET wins = 0, matches = 0\")\n\n DB.commit()\n DB.close\n\n\ndef deletePlayers():\n \"\"\"Remove all the player records from the database.\"\"\"\n DB = connect()\n cur = DB.cursor()\n\n #Delete data from the matches table, but keep the table\n cur.execute(\"delete from players;\")\n DB.commit()\n DB.close()\n\n\ndef countPlayers():\n \"\"\"Returns the number of players currently registered.\"\"\"\n DB = connect()\n cur = DB.cursor()\n\n #Aggregate all the players from the player table\n cur.execute(\"SELECT count(*) FROM players\")\n count = cur.fetchone()[0]\n DB.close()\n\n return count\n\n\ndef registerPlayer(name):\n \"\"\"Adds a player to the tournament database.\n \n The database assigns a unique serial id number for the player. (This\n should be handled by your SQL database schema, not in your Python code.)\n \n Args:\n name: the player's full name (need not be unique).\n \"\"\"\n DB = connect()\n cur = DB.cursor()\n\n cur.execute(\"INSERT INTO players (player_name, wins, matches) VALUES (%s, %s, %s)\", (name, 0 , 0))\n\n DB.commit()\n DB.close()\n\n\ndef playerStandings():\n \"\"\"Returns a list of the players and their win records, sorted by wins.\n\n The first entry in the list should be the player in first place, or a player\n tied for first place if there is currently a tie.\n\n Returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won\n matches: the number of matches the player has played\n \"\"\"\n DB = connect()\n cur = DB.cursor()\n cur.execute(\"SELECT * from players order by wins desc\")\n standings = cur.fetchall()\n\n DB.close()\n\n return standings\n\n\ndef reportMatch(winner, loser):\n \"\"\"Records the outcome of a single match between two players.\n\n Args:\n winner: the id number of the player who won\n loser: the id number of the player who lost\n \"\"\"\n DB = connect()\n cur = DB.cursor()\n cur.execute(\"UPDATE players SET wins = wins+1, matches = matches+1 WHERE player_id = %s\", (winner,))\n cur.execute(\"UPDATE players SET matches = matches+1 WHERE player_id = %s\", (loser,))\n\n DB.commit()\n DB.close\n \n \ndef swissPairings():\n \"\"\"Returns a list of pairs of players for the next round of a match.\n \n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n \n Returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id\n name2: the second player's name\n \"\"\"\n\n #Giving up on a cool self join query, based on forum feedback\n # QUERY = '''\n # SELECT DISTINCT on (a.player_id)\n # a.player_id, a.player_name, b.player_id, b.player_name\n # FROM players as a, players as b\n # WHERE a.wins = b.wins\n # AND a.matches = b.matches\n # AND a.player_id < b.player_id\n # ORDER BY a.player_id;\n # '''\n standings = playerStandings()\n pairings = []\n while standings:\n (i1, n1, w1, m1) = standings[0]\n (i2, n2, w2, m2) = standings[1]\n pairings.append((i1, n1, i2, n2))\n del standings[0:2]\n\n\n\n return pairings\n\n\n\n","sub_path":"vagrant/tournament/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447286297","text":"import os \n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nimport numpy as np\nfrom keras.models import *\nfrom keras.layers import Input, merge, Conv2D, MaxPooling2D, UpSampling2D, Dropout, Cropping2D\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard\nfrom keras import backend as keras \nfrom time import time\n\nfrom generator import generateENETRandom as generateRandom\nfrom generator import countFolderImages\n\nimport click\nimport datetime as dt\n\n\nclass myUnet(object):\n\n\n def __init__(self, img_rows, img_cols):\n\n self.img_rows = img_rows\n self.img_cols = img_cols\n\n def get_unet(self):\n inputs = Input((self.img_rows, self.img_cols,2))\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\n print (\"conv1 shape:\",conv1.shape)\n conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\n print (\"conv1 shape:\",conv1.shape)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n print (\"pool1 shape:\",pool1.shape)\n\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\n print (\"conv2 shape:\",conv2.shape)\n conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\n print (\"conv2 shape:\",conv2.shape)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n print (\"pool2 shape:\",pool2.shape)\n\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\n print (\"conv3 shape:\",conv3.shape)\n conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\n print (\"conv3 shape:\",conv3.shape)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n print (\"pool3 shape:\",pool3.shape)\n\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\n conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\n drop4 = Dropout(0.5)(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n drop5 = Dropout(0.5)(conv5)\n\n up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\n merge6 = merge([drop4,up6], mode = 'concat', concat_axis = 3)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\n conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\n\n up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\n merge7 = merge([conv3,up7], mode = 'concat', concat_axis = 3)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\n conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\n\n up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\n merge8 = merge([conv2,up8], mode = 'concat', concat_axis = 3)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\n conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\n\n up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\n merge9 = merge([conv1,up9], mode = 'concat', concat_axis = 3)\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\n conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\n conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)\n\n model = Model(input = inputs, output = conv10)\n\n model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy' , metrics = ['accuracy'])\n return model\n\n\n def train(self, epochs, period, folders_limit, main, frame_pre, frame_ext, t):\n\n TensorBoard(log_dir='..\\\\graphs\\\\' + 'graph_'+t, histogram_freq=0, write_graph=True, write_images=True)\n tbCallBack = TensorBoard(log_dir='..\\\\graphs\\\\' + 'graph_'+t, histogram_freq=0, write_graph=True, write_images=True)\n\n model = self.get_unet()\n\n checkpoint_parent = '..\\\\checkpoints\\\\'\n checkpoint = checkpoint_parent + 'enet_' + t + '.hdf5'\n best_checkpoint = checkpoint_parent + 'best_enet_' + t + '.hdf5'\n model_checkpoint = ModelCheckpoint(checkpoint, monitor='loss', save_best_only=False, verbose=1, mode='auto', period=period)\n mc_best = ModelCheckpoint(best_checkpoint, monitor='loss', save_best_only=True, verbose=1, mode='auto' , period=period)\n\n model.fit_generator(generateRandom(folders_limit, main, frame_pre, frame_ext), steps_per_epoch=30, epochs=epochs, verbose=1, callbacks=[model_checkpoint, tbCallBack, mc_best])\n\ndef get_unet():\n myunet = myUnet(224, 224)\n return myunet.get_unet()\n\n@click.command()\n@click.option('--name', default=str(dt.date.today()), help='Name of the experiment', show_default=True)\n@click.option('--src', default='..\\\\data\\\\tabletennis\\\\frames\\\\', help='Source of data', show_default=True)\n@click.option('--folders', default=1, help='Number of folders to train', show_default=True)\n@click.option('--epochs', default=2005, help='Number of epochs', show_default=True)\n@click.option('--period', default=200, help='Saving after period', show_default=True)\ndef main(name, src, folders, epochs, period):\n print(name, src, folders, epochs, period)\n countFolderImages(folders, src)\n myunet = myUnet(224, 224)\n myunet.train(epochs, period, folders, src, '', '.jpg', name + '____' + str(time()))\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/enet.py","file_name":"enet.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"30622970","text":"\nimport sqlite3\n#from omdb_controller import Controller\nconn=sqlite3.connect(\"omdb1.db\")\nc=conn.cursor()\n\n# sql=\"SELECT * FROM movies WHERE title=?\"\n# my_cur= c.execute(sql, (\"Steam\",))\n# my_result=my_cur.fetchone()\n#print (my_result)\n\n#print (Controller().search_db('Steam'))\n\n#strin=\"Robert Downey Jr., Terrence Howard, Jeff Bridges, Gwyneth Paltrow\"\n# lst=strin.split(',')\n# for name in lst:\n# first_name=name.split()[0]\n# last_name=name.split()[1]\n# suffix=name.split()[2]\n# #print(lst)\n\n# sql = \"\"\"SELECT movies.movie_id, actors.actor_id\n# FROM movies, actors\n# WHERE movies.title=? AND actors.first_name=?\n# AND actors.last_name=?\"\"\"\n\n# sql1=\"\"\"SELECT * FROM movies WHERE title LIKE ?;\"\"\"\n\n# m='top'\n\n# t=c.execute(sql1, ('%'+m+'%',))\n\n# p=c.execute(sql, ('Friends', \"Lisa\", 'Kudrow'))\n\nsql2=\"SELECT * FROM movies_actors WHERE movie_id='1';\"\np=c.execute(sql2)\nrow=p.fetchall()\nactors_list=[]\nfor pair in row:\n actors_list.append(str(pair[0]))\n\nsql3=\"SELECT * FROM actors WHERE actor_id=(?);\"\nactors_names=[]\nfor actor in actors_list:\n v=c.execute(sql3, actor)\n row1=v.fetchone()[1:]\n actors_names.append(row1)\n\nsql3=\"SELECT * FROM movies_directors WHERE movie_id=?;\"\ndirect_cur=c.execute(sql3, '5')\ndd=direct_cur.fetchone()\ndir_id=str (dd[0])\nsql4=\"SELECT * FROM directors WHERE director_id=(?);\"\ndir_id_cur=c.execute(sql4, (dir_id,))\ndirect_names=dir_id_cur.fetchone()[1:]\n\n\nprint(actors_names)\n\nc.close()\n\n","sub_path":"omdb/outline/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137359031","text":"import datetime\n\nimport numpy as np\n\nfrom recomendations.vectorizing.faculty_name import faculty_name_vector, load_default_faculty_name_vectorizers\nfrom recomendations.vectorizing.subscriptions import load_default_subscription_vectorizers, subscriptions_vector\n\n_education_count_vectorizer = None\n_education_pca_vectorizer = None\n_subscriptions_lda_vectorizer = None\n_subscriptions_tfidf_vectorizer = None\n_subscriptions_dictionary = None\n\n\ndef user_vector(user, subscriptions):\n arr = []\n arr.extend(_sex_user_vec_part(user))\n arr.extend(_bdate_user_vec_part(user))\n arr.extend(_education_user_vec_part(user))\n arr.extend(_subscriptions_user_vec_part(subscriptions))\n return np.array(arr)\n\n\ndef _sex_user_vec_part(user):\n return [int(user['sex']) == 1 if 'sex' in user else 0,\n int(user['sex']) == 2 if 'sex' in user else 0]\n\n\ndef _bdate_user_vec_part(user):\n if 'bdate' in user:\n bdate_parts = user['bdate'].split('.')\n bdate_year = bdate_parts[-1] if len(bdate_parts) == 3 else None\n current_year = datetime.datetime.now().year\n age = current_year - bdate_year\n else:\n age = None\n\n is_age_between_10_and_15 = int(10 <= age <= 15) if age is not None else 0\n is_age_between_16_and_20 = int(16 <= age <= 15) if age is not None else 0\n is_age_between_21_and_25 = int(21 <= age <= 25) if age is not None else 0\n is_age_between_26_and_30 = int(26 <= age <= 30) if age is not None else 0\n is_age_between_31_and_35 = int(31 <= age <= 35) if age is not None else 0\n is_age_between_36_and_inf = int(36 <= age) if age is not None else 0\n\n return [is_age_between_10_and_15, is_age_between_16_and_20, is_age_between_21_and_25,\n is_age_between_26_and_30, is_age_between_31_and_35, is_age_between_36_and_inf]\n\n\ndef _education_user_vec_part(user):\n global _education_count_vectorizer, _education_pca_vectorizer\n if _education_count_vectorizer is None and _education_pca_vectorizer is None:\n _education_count_vectorizer, _education_pca_vectorizer = load_default_faculty_name_vectorizers()\n\n return faculty_name_vector(user.get('university_name', ''), user.get('faculty_name', ''),\n _education_count_vectorizer, _education_pca_vectorizer)\n\n\ndef _subscriptions_user_vec_part(subscriptions):\n global _subscriptions_lda_vectorizer, _subscriptions_tfidf_vectorizer, _subscriptions_dictionary\n if _subscriptions_lda_vectorizer is None and _subscriptions_tfidf_vectorizer is None and _subscriptions_dictionary is None:\n _subscriptions_lda_vectorizer, _subscriptions_tfidf_vectorizer, _subscriptions_dictionary = load_default_subscription_vectorizers()\n\n sub_descriptions = ['{} {}'.format(sub.get('name', ''), sub.get('description', '')) for sub in subscriptions]\n\n return subscriptions_vector(sub_descriptions,\n _subscriptions_lda_vectorizer, _subscriptions_tfidf_vectorizer, _subscriptions_dictionary)\n","sub_path":"recomendations/vectorizing/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574158130","text":"from javalang import tokenizer\nfrom javalang import parse\nimport javalang\n\nimport subprocess\nimport random\nimport intervals as I\nimport collections\nimport sys\nimport os\n\nfrom core import *\n\ndef gen_ugly_v2(file_path, output_dir, file_name=''):\n n_modif = 1\n\n java_source = open_file(file_path)\n whitespace, tokens = tokenize_with_white_space(java_source, relative=True)\n\n suitable_for_intertion = range(0, len(whitespace))\n\n def token_ok_for_deletion(token):\n if isinstance(token, tokenizer.Separator):\n return True\n if isinstance(token, tokenizer.Operator):\n return True\n return False\n\n suitable_for_deletion = [\n pos\n for pos in range(0, len(whitespace) - 1)\n if (whitespace[pos][0] > 1 \n or whitespace[pos][0]\n or token_ok_for_deletion(pos)\n or token_ok_for_deletion(pos+1))\n ]\n\n for modif_id in range(n_modif):\n operation = random.choice([1, -1])\n index = random.choice([1, -1])\n if operation > 0:\n pos = random.choice(suitable_for_intertion)\n original_whitespace = whitespace[pos]\n else:\n original_whitespace = (-1,-1)\n while original_whitespace[index] + operation < 0:\n pos = random.choice(suitable_for_deletion)\n original_whitespace = whitespace[pos]\n\n new_whitespace = list(original_whitespace)\n\n new_whitespace[index] += operation\n\n whitespace[pos] = tuple(new_whitespace)\n\n new_java_source = reformat(whitespace, tokens, relative = True)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if file_name == '':\n file_name = file_path.split(\"/\")[-1]\n \n return save_file(output_dir, file_name, new_java_source)\n\n\ndef mix_files_v2(file_A_path, file_B_path, output_file, from_line, to_line=-1):\n java_source_A = open_file(file_A_path)\n whitespace_A, tokens_A = tokenize_with_white_space(java_source_A, relative=True)\n\n java_source_B = open_file(file_B_path)\n whitespace_B, tokens_B = tokenize_with_white_space(java_source_B, relative=True)\n\n if to_line == -1:\n to_line = from_line\n\n from_token = len(tokens_A)\n to_token = 0\n\n for pos, token in enumerate(tokens_A):\n if token.line < from_line:\n from_token = pos\n if token.line <= to_line:\n to_token = pos\n from_token += 1\n\n tokens = tokens_A\n whitespace = whitespace_A[:from_token] + whitespace_B[from_token:to_token+1] + whitespace_A[to_token+1:]\n\n new_java_source = reformat(whitespace, tokens, relative = True)\n \n output_dir = output_file.split('/')[:-1]\n file_name = output_file.split('/')[-1]\n\n return save_file(output_dir, file_name, new_java_source) \n\ndef gen_ugly(file_path, output_dir, modification_number = (1,0,0,0,0)):\n \"\"\"\n Gen an ugly vertsion of of .java file\n \"\"\"\n insertions_sample_size_space = modification_number[0]\n insertions_sample_size_tab = modification_number[1]\n insertions_sample_size_newline = modification_number[2]\n insertions_sample_size = insertions_sample_size_space + insertions_sample_size_tab + insertions_sample_size_newline\n deletions_sample_size_space = modification_number[3]\n deletions_sample_size_newline = modification_number[4]\n deletions_sample_size = deletions_sample_size_space + deletions_sample_size_newline\n # deletions_sample_size = modification_number - insertions_sample_size\n with open(file_path) as f:\n file_lines = f.readlines()\n file_content = \"\".join(file_lines)\n\n tokens = tokenizer.tokenize(file_content)\n tokens = [ t for t in tokens]\n # print(\"\\n\".join([ str(t) for t in tokens]))\n\n\n # Take a sample of locations suitable for insertions\n insertions_sample = random.sample( tokens, min(insertions_sample_size, len(tokens)) )\n\n insertions = dict();\n\n insertions_chars = ([' '] * insertions_sample_size_space);\n insertions_chars.extend(['\\t'] * insertions_sample_size_tab)\n insertions_chars.extend(['\\n'] * insertions_sample_size_newline)\n random.shuffle(insertions_chars)\n\n for element, char in zip(insertions_sample, insertions_chars):\n insertions[element.position] = char\n\n # Select every locations suitable for deletions (i.e. before or after a separator/operator)\n deletions_spots = list()\n suitable_for_deletions = [tokenizer.Separator, tokenizer.Operator]\n for index in range(0, len(tokens)-1):\n if ( type(tokens[index]) in suitable_for_deletions):\n prev_token_position = tokens[index-1].position;\n tokens_position = tokens[index].position;\n next_token_position = tokens[index+1].position;\n end_of_prev_token = (prev_token_position[0], prev_token_position[1] + len(tokens[index-1].value))\n end_of_token = (tokens_position[0], tokens_position[1] + len(tokens[index].value))\n if (end_of_prev_token != tokens_position):\n #print(\"prev : \", tokens[index-1].value , tokens[index].value, tokens[index+1].value, tokens[index].position)\n deletions_spots.append((end_of_prev_token, tokens_position))\n if (end_of_token != next_token_position):\n #print(\"next : \", tokens[index-1].value , tokens[index].value, tokens[index+1].value, tokens[index].position)\n deletions_spots.append((end_of_token, next_token_position))\n deletions_spots = list(set(deletions_spots))\n\n # Take a sample of locations suitable for deletions\n deletions_sample = random.sample( deletions_spots, min(deletions_sample_size, len(deletions_spots)) )\n\n deletions = dict()\n for deletion_intervals in deletions_spots:\n #print(deletion_intervals)\n from_char = deletion_intervals[0]\n to_char = deletion_intervals[1]\n while from_char[0] <= to_char[0]:\n if from_char[0] == to_char[0]:\n interval = I.closedopen(from_char[1], to_char[1] )\n else:\n interval = I.closedopen(from_char[1], I.inf )\n if ( from_char[0] not in deletions):\n deletions[from_char[0]] = list()\n deletions[from_char[0]].append(interval)\n from_char=(from_char[0]+1, 0)\n\n\n deletions_spots_chars = dict()\n line_num = 1\n for line in file_lines:\n char_num = 1\n for char in line:\n if ( line_num in deletions ):\n for intervals in deletions[line_num]:\n if char_num in intervals:\n if (char not in deletions_spots_chars):\n deletions_spots_chars[char] = []\n deletions_spots_chars[char].append((line_num, char_num))\n char_num = char_num + 1\n line_num = line_num + 1\n\n\n deletions = []\n if (' ' in deletions_spots_chars):\n deletions.extend(random.sample(deletions_spots_chars[' '], deletions_sample_size_space))\n if ('\\n' in deletions_spots_chars):\n deletions.extend(random.sample(deletions_spots_chars['\\n'], deletions_sample_size_newline))\n\n # print(insertions)\n # print(deletions)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n output_path = os.path.join(output_dir, f'./{file_path.split(\"/\")[-1]}')\n\n # Write the output file\n with open(output_path, \"w\") as output_file_object:\n line_num = 1\n for line in file_lines:\n char_num = 1\n for char in line:\n skip = False\n if ((line_num, char_num) in deletions):\n skip = True\n if ((line_num, char_num) in insertions):\n output_file_object.write(insertions[(line_num, char_num)])\n if ( not skip ):\n output_file_object.write(char)\n char_num = char_num + 1\n line_num = line_num + 1\n return tuple(set(deletions) | set(insertions.keys()))\n\n\ndef mix_sources(source_A, source_B, from_line, to_line=-1):\n \"\"\"Put a little bit of B into A\n \"\"\"\n if to_line == -1:\n to_line = from_line\n\n file_A_lines = [ line + '\\n' for line in source_A.split('\\n') ]\n file_B_lines = [ line + '\\n' for line in source_B.split('\\n') ]\n\n tokens_A = tokenizer.tokenize(source_A)\n tokens_B = tokenizer.tokenize(source_B)\n\n tokens = zip(tokens_A, tokens_B)\n lines = range(from_line, to_line)\n\n output_source = \"\"\n\n first_part = ''.join(file_A_lines[:(from_line-1)])\n output_source += first_part\n from_token = None\n first_token_of_A = None\n to_token = None\n last_token_of_A = None\n for token_A, token_B in tokens:\n if token_A.position[0] >= from_line and token_A.position[0] <= to_line:\n if 'form_token' not in locals():\n form_token = token_B\n first_token_of_A = token_A\n to_token = token_B\n last_token_of_A = token_A\n # print(first_token_of_A,last_token_of_A)\n if last_token_of_A:\n if first_token_of_A.position[0] != from_line:\n output_source += ''.join(file_A_lines[(from_line-1):(first_token_of_A.position[0]-1)])\n output_source += \" \"*(first_token_of_A.position[1]-1)\n output_source += source_B[(len(''.join(file_B_lines[:(form_token.position[0]-1)])) + form_token.position[1] - 1):(len(''.join(file_B_lines[:(to_token.position[0]-1)])) + to_token.position[1] + len(to_token.value) - 1)]\n output_source += '\\n'\n if last_token_of_A.position[0] != to_line:\n output_source += ''.join(file_A_lines[(last_token_of_A.position[0]):(to_line)])\n output_source += ''.join(file_A_lines[(to_line):])\n else:\n output_source += ''.join(file_A_lines[(from_line-1):])\n\n return output_source\n\n# The tokens should be the same\n# Patch parts of B into A,\ndef mix_files(file_A_path, file_B_path, output_file, from_line, to_line=-1):\n \"\"\"Put a little bit of B into A\n \"\"\"\n if to_line == -1:\n to_line = from_line\n\n with open(file_A_path) as f:\n file_A_lines = f.readlines()\n\n try:\n with open(file_B_path) as f:\n file_B_lines = f.readlines()\n except FileNotFoundError:\n with open(output_file, \"w\") as output_file_object:\n output_file_object.write(\"\".join(file_A_lines))\n return output_file\n\n file_A_content = \"\".join(file_A_lines)\n file_B_content = \"\".join(file_B_lines)\n\n output_source = mix_sources(file_A_content, file_B_content, from_line, to_line=to_line)\n\n output_dir = \"/\".join(output_file.split(\"/\")[:-1])\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n with open(output_file, \"w\") as output_file_object:\n output_file_object.write(output_source)\n\n return output_file\n\n\ndef reformat(whitespace, tokens, tabulations=False, relative=True):\n \"\"\"\n Given the sequence of whitespaces and javat token reformat the java source code\n :return: the java source code\n \"\"\"\n result = ''\n position = 0\n for ws, t in zip(whitespace, tokens):\n if ws[0] > 0:\n if relative:\n position = max(position + ws[1], 0)\n if tabulations:\n result += str(t.value) + \"\\n\" * ws[0] + \"\\t\" * position\n else:\n result += str(t.value) + \"\\n\" * ws[0] + \" \" * position\n else:\n if tabulations:\n result += str(t.value) + \"\\n\" * ws[0] + \"\\t\" * ws[1]\n else:\n result += str(t.value) + \"\\n\" * ws[0] + \" \" * ws[1]\n else:\n result += str(t.value) + \" \" * ws[1]\n return result\n\n\ndef tokenize_with_white_space(file_content, relative=True, new_line_at_the_end_of_file=True):\n \"\"\"\n Tokenize the java source code\n :param file_content: the java source code\n :return: (whitespace, tokens)\n \"\"\"\n position_last_line = 1;\n tokens = tokenizer.tokenize(file_content, parse_comments=True)\n tokens = [ t for t in tokens]\n whitespace = list()\n for index in range(0, len(tokens)-1):\n tokens_position = tokens[index].position;\n next_token_position = tokens[index+1].position;\n end_of_token = (tokens_position[0], tokens_position[1] + len(tokens[index].value))\n if end_of_token == next_token_position:\n whitespace.append((0,0))\n else :\n if ( end_of_token[0] == next_token_position[0] ):\n # same line\n whitespace.append(( 0, next_token_position[1] - end_of_token[1]))\n else:\n # new line\n if relative:\n whitespace.append(( next_token_position[0] - end_of_token[0] - tokens[index].value.count('\\n'), next_token_position[1] - position_last_line))\n position_last_line = next_token_position[1] \n else:\n whitespace.append(( next_token_position[0] - end_of_token[0] - tokens[index].value.count('\\n'), next_token_position[1])) \n if new_line_at_the_end_of_file:\n whitespace.append((1,0))\n else:\n if file_content[-1] == '\\n':\n if file_content[-2] == '\\n':\n whitespace.append((2,0))\n else:\n whitespace.append((1,0))\n else:\n whitespace.append((0,0))\n # rewritten = reformat(whitespace, tokens)\n # print(rewritten)\n # return rewritten\n return whitespace, tokens\n\n\ndef get_char_pos_from_lines(file_path, from_line, to_line=-1):\n \"\"\"\n Tokenize the java source code\n :param file_content: the java source code\n :return: (whitespace, tokens)\n \"\"\"\n if to_line == -1:\n to_line = from_line\n file_lines = None\n with open(file_path) as f:\n file_lines = f.readlines()\n if file_lines:\n from_char = len(''.join(file_lines[:(from_line-1)]))\n to_char = from_char + len(''.join(file_lines[(from_line-1):to_line]))\n return (from_char, to_char)\n else:\n return (-1, -1)\n\n\ndef check_well_formed(file_path):\n \"\"\"\n Check if javalang can parse the file\n :param file_path: the java file dir\n \"\"\"\n with open(file_path) as f:\n file_content = f.read()\n try:\n tree = parse.parse(file_content)\n return True\n except javalang.parser.JavaSyntaxError as error:\n print(error)\n return False\n except:\n pass\n\n\ndef get_bad_formated(dir):\n \"\"\"\n Get all the bad formated files from a dir\n :param dir: dir to check recursively\n :return: list of path to java files\n \"\"\"\n bad_formated_files = []\n for folder in os.walk(dir):\n for file_name in folder[2]:\n file_path = os.path.join(folder[0], file_name)\n if file_path.endswith('.java'):\n if ( not check_well_formed(file_path) ):\n bad_formated_files.append(file_path)\n return bad_formated_files\n\n\ndef compute_diff_size(file_A, file_B):\n \"\"\"\n Check the diff size between file A and B\n :return: the size of the diff\n \"\"\"\n cmd = 'diff {} {}'.format(file_A, file_B)\n process = subprocess.Popen(cmd.split(\" \"), stdout=subprocess.PIPE)\n output = process.communicate()[0]\n return output.count(b'\\n>') + output.count(b'\\n<')\n\n\nif __name__ == \"__main__\":\n if (sys.argv[1] == \"char_pos\"):\n print(get_char_pos_from_lines(sys.argv[2], int(sys.argv[3])))\n elif (sys.argv[1] == \"ugly\"):\n print(gen_ugly( sys.argv[2], sys.argv[3] ))\n elif (sys.argv[1] == \"tokenize_ws\"):\n whitespace, tokens = tokenize_with_white_space(open_file(sys.argv[2]))\n #print(reformat(whitespace, tokens))\n print(\"\\n\".join([str(e) for e in zip(whitespace, tokens)]))\n elif (sys.argv[1] == \"mix\"):\n mix_files(sys.argv[2], sys.argv[3], sys.argv[4], 62, 64)\n elif (sys.argv[1] == \"diff\"):\n print(compute_diff_size(sys.argv[2], sys.argv[3]))\n","sub_path":"python/java_lang_utils.py","file_name":"java_lang_utils.py","file_ext":"py","file_size_in_byte":15952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34618991","text":"import re\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport json\nimport xlwt\n\n\ndef download_img(item_dict):\n for k, v in item_dict.items():\n imgnum = 0\n print('itemimgurl', v[5])\n for imgurl in v[5]:\n if imgnum == 0:\n imgreq = requests.get(imgurl, verify=False)\n if imgreq.status_code < 400:\n with open(basepath + auctionname + '\\\\' + str(k) + '.jpg', 'wb') as f:\n f.write(imgreq.content)\n imgnum += 1\n else:\n imgreq = requests.get(imgurl, verify=False)\n if imgreq.status_code < 400:\n with open(basepath + auctionname + '\\\\' + str(k) + '_' + str(imgnum) + '.jpg', 'wb') as f:\n f.write(imgreq.content)\n imgnum += 1\n\ndef save_excel(item_dict):\n\n excel_init_file = xlwt.Workbook(encoding='utf-8')\n table = excel_init_file.add_sheet('auction', cell_overwrite_ok=True)\n row_num = 0\n for k, v in item_dict.items():\n table.write(row_num, 0, k)\n table.write(row_num, 1, str(v[0]))\n table.write(row_num, 2, str(v[1]))\n table.write(row_num, 3, str(v[2]))\n table.write(row_num, 4, str(v[3]))\n table.write(row_num, 5, str(v[4]))\n row_num += 1\n excel_init_file.save(basepath + auctionname + '\\\\'+'auction.xls')\n\n\ndef del_file(path):\n for i in os.listdir(path):\n path_file = os.path.join(basepath + auctionname, i)\n if os.path.isfile(path_file):\n os.remove(path_file)\n else:\n del_file(path_file)\n\ndef create_folder():\n if os.path.isdir(basepath + auctionname):\n if len(os.listdir(basepath + auctionname)) != 0:\n del_file(basepath + auctionname + '\\\\')\n print('文件删除完成!')\n else:\n os.makedirs(basepath + auctionname )\n print('文件夹已创建!')\n\n\n\n\n\nbasepath = 'C:\\\\auctions\\\\igavelauctions\\\\'\nauctionname = 'a5wa'\n\ncreate_folder()\n\nkeyword = 'a5wa'\nurl = 'http://bid.igavelauctions.com/service/data.ashx?AjaxMode=SearchPage'\n\nitem_dict = {}\n\ndata1 = {\n 'format':'rawjson',\n 'nodata':'1',\n 'PageNum':'1',\n 'PerPage':'400',\n 'Log':'0',\n 'UseSKU':'0',\n 'Featured':'-1',\n 'FT_Use':'1',\n 'FT_DelimWord':'OR',\n 'FT_Inflect':'0',\n 'FT_Prefix':'1',\n 'FT_Description':'0',\n 'CheckInvoiceAuction':'0',\n 'Category_ID':'0',\n 'Category_Sub':'0',\n 'Category_Status':'-1',\n 'Group_ID':'0',\n 'ItemStatus':'4',\n 'BidOn':'-1',\n 'ShowItemsInWatchList':'1',\n 'ShowItemsBidOn':'1',\n 'HasIP':'-1',\n 'ItemMode':'1',\n 'Event_ID':'-1',\n 'Donor_ID':'0',\n 'ItemType':'-1',\n 'IsHighBidder':'-1',\n 'Orientation_X':'List',\n 'Sort':'1',\n 'SortToC':'0',\n 'HideImages':'0',\n 'OnlyItemsInWatchList':'0',\n 'OnlyItemsInRecentView':'0',\n 'OnlyItemsBidOn':'0',\n 'Search':keyword,\n 'DaysBack':'10',\n 'DonorData':'none',\n 'ItemStatusVal':'1',\n 'ClosedEarly':'0',\n\n}\n\nreq = requests.get(url,data=data1)\ntotal_num = json.loads(req.text)['Data']['Table'][0]['TotalRecords']\nprint('total_num',total_num)\n\ndata2 = {\n 'format':'rawjson',\n 'nodata':'1',\n 'PageNum':'1',\n 'PerPage':total_num,\n 'Log':'0',\n 'UseSKU':'0',\n 'Featured':'-1',\n 'FT_Use':'1',\n 'FT_DelimWord':'OR',\n 'FT_Inflect':'0',\n 'FT_Prefix':'1',\n 'FT_Description':'0',\n 'CheckInvoiceAuction':'0',\n 'Category_ID':'0',\n 'Category_Sub':'0',\n 'Category_Status':'-1',\n 'Group_ID':'0',\n 'ItemStatus':'4',\n 'BidOn':'-1',\n 'ShowItemsInWatchList':'1',\n 'ShowItemsBidOn':'1',\n 'HasIP':'-1',\n 'ItemMode':'1',\n 'Event_ID':'-1',\n 'Donor_ID':'0',\n 'ItemType':'-1',\n 'IsHighBidder':'-1',\n 'Orientation_X':'List',\n 'Sort':'1',\n 'SortToC':'0',\n 'HideImages':'0',\n 'OnlyItemsInWatchList':'0',\n 'OnlyItemsInRecentView':'0',\n 'OnlyItemsBidOn':'0',\n 'Search':keyword,\n 'DaysBack':'10',\n 'DonorData':'none',\n 'ItemStatusVal':'1',\n 'ClosedEarly':'0',\n}\n\nreq = requests.get(url,data=data1)\nitem_list = json.loads(req.text)['Data']['Table']\n# print(item_list)\nprint('拍品数量',len(item_list))\n\nfor item in item_list:\n # print(item)\n #拍品编号\n lot_num=item['Lot']\n print('lot_num',lot_num)\n #title\n title = item['Title']\n print('title',title)\n # startprice\n startprice=item['Bid']\n print('startprice',startprice)\n #item_url\n item_url = 'http://bid.igavelauctions.com/Bidding.taf?_function=detail&Auction_uid1='+ str(lot_num).strip()\n print('item_url',item_url)\n req1 = requests.get(item_url)\n soup = BeautifulSoup(req1.text,'html5lib')\n # print(soup)\n item_elm = soup.find_all('span',{'id':re.compile(r'Item_Description_.*')})[0]\n # print(item_elm)\n item_text = item_elm.text\n # print(item_text)\n\n #估价\n est = re.findall('Estimate(.*)',item_text)\n # print('est==>',est)\n if est:\n est = est[0].strip()\n # print(est)\n if est:\n len_est = len(est.split('-'))\n if len_est > 1:\n est_low = est.split('-')[0]\n est_high = est.split('-')[1]\n else:\n est_low = est.split('-')[0]\n est_high = est_low\n else:\n est_low = ''\n est_high = ''\n else:\n est_low = ''\n est_high = ''\n print(est_low)\n print(est_high)\n\n #描述\n\n des = re.findall('Description(.*)', item_text)\n # print('des==>', des)\n if des:\n des = des[0].strip()\n # print(des)\n else:\n des = ''\n\n provenance = re.findall('Provenance(.*)', item_text)\n # print('provenance==>', provenance)\n if provenance:\n provenance = provenance[0].strip()\n else:\n provenance = ''\n # print(provenance)\n\n measurements = re.findall('Measurements(.*)', item_text)\n # print('measurements==>', measurements)\n if measurements:\n measurements = measurements[0].strip()\n else:\n measurements = ''\n # print(measurements)\n\n weight = re.findall('Weight(.*)', item_text)\n # print('weight==>', weight)\n if weight:\n weight = weight[0].strip()\n else:\n weight = ''\n # print(weight)\n\n des_sum = ((('provenance:' + provenance) if provenance else '')+ ' ' + (('measurements:' + measurements) if measurements else '')+ ' ' + (('weight:' + weight) if weight else '')+ ' ' + (des if des else '')).strip()\n print(des_sum)\n # except:\n # continue\n\n #img_url\n img_url_list = []\n img_elm_list = soup.select_one('#imglist')\n if img_elm_list:\n img_elm_list = img_elm_list.select('a')\n for img in img_elm_list:\n img_url = img.get('href')\n img_url_list.append(img_url)\n print(img_url_list)\n else:\n img_url = soup.select_one('a.MagicThumb').get('href')\n img_url_list.append(img_url)\n\n\n\n #写入字典\n item_dict[lot_num] = [title,des_sum,startprice,est_low,est_high,img_url_list]\n # print(item_dict)\n\nsave_excel(item_dict)\ndownload_img(item_dict)\n\n\n\n","sub_path":"tools/Crawler/爬虫www.igavelauctions.com.py","file_name":"爬虫www.igavelauctions.com.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311596668","text":"from pathlib import Path\nimport scipy.misc\nimport numpy as np\nimport os\nimport nibabel as nib\nfrom skimage.transform import resize\n\nclass PreProcessing:\n def __init__(self, hu_min=-512, hu_max=512, source=None, destination=None, resize_shape=(240, 240)):\n self.hu_min = hu_min\n self.hu_max = hu_max\n self.source = source\n self.destination = destination\n self.resize_shape = (resize_shape[0],resize_shape[1])\n\n def set_source(self, source):\n self.source = source\n\n def set_destination(self, destination):\n self.destination = destination\n\n def load_volume(self, case_id):\n case_id = str(case_id).zfill(5)\n img_name = ''\n for file in os.listdir(self.source):\n if str(case_id) in file and 'imaging' in file:\n img_name = file\n img = nib.load(os.path.join(self.source, img_name))\n return img\n\n def load_case(self, case_id):\n case_id = str(case_id).zfill(5)\n img_name = ''\n mask_name = ''\n for file in os.listdir(self.source):\n if str(case_id) in file and 'imaging' in file:\n img_name = file\n elif str(case_id) in file and 'segmentation' in file:\n mask_name = file\n img = nib.load(os.path.join(self.source, img_name))\n mask = nib.load(os.path.join(self.source, mask_name))\n return img, mask\n\n def hu_to_grayscale(self, volume):\n # Clip at max and min values if specified\n if self.hu_min is not None or self.hu_max is not None:\n volume = np.clip(volume, self.hu_min, self.hu_max)\n\n # Scale to values between 0 and 1\n mxval = np.max(volume)\n mnval = np.min(volume)\n im_volume = (volume - mnval) / max(mxval - mnval, 1e-3)\n\n # Return values scaled to 0-255 range, but *not cast to uint8*\n # Repeat three times to make compatible with color overlay\n im_volume = 255 * im_volume\n return im_volume\n\n @staticmethod\n def create_dir(dir):\n dir_path = Path(dir)\n if not dir_path.exists():\n dir_path.mkdir()\n return dir_path\n\n def preprocess (self, num_patients = 10, starting_patient=0):\n if self.source is None and self.destination is None:\n raise ValueError(\"Please make sure to set source and destination\")\n in_path = Path(self.source)\n if not in_path.exists():\n raise ValueError(\"Source directory doesn't exist\")\n\n _ = self.create_dir(self.destination)\n image_path = self.destination+'Image'\n image_path = self.create_dir(image_path)\n mask_path = self.destination+'Mask'\n mask_path = self.create_dir(mask_path)\n\n for i in range(starting_patient, starting_patient+num_patients):\n # Load segmentation and volume\n vol, seg = self.load_case(i)\n vol = vol.get_data()\n seg = seg.get_data()\n seg = seg.astype(np.int32)\n\n # Convert to a visual format\n vol_ims = self.hu_to_grayscale(vol)\n\n for j in range(vol_ims.shape[0]):\n #extracting only images that have tumor\n if np.max(seg[j]) == 2: #np.max(seg[j]) != 0:\n file_path = image_path / (\"{}_{:05d}.png\".format(i, j))\n image = resize(vol_ims[j], self.resize_shape)\n scipy.misc.imsave(str(file_path), image)\n file_path = mask_path / (\"{}_{:05d}.png\".format(i, j))\n mask = resize(seg[j], self.resize_shape,\n order=0, anti_aliasing=False,\n preserve_range=True)\n scipy.misc.imsave(str(file_path), mask)\n\n def preprocess_predictions(self,num_patients, starting_patient):\n if self.source is None and self.destination is None:\n raise ValueError(\"Please make sure to set source and destination\")\n in_path = Path(self.source)\n if not in_path.exists():\n raise ValueError(\"Source directory doesn't exist\")\n\n _ = self.create_dir(self.destination)\n image_path = self.destination+'test_images'\n image_path = self.create_dir(image_path)\n\n for i in range(starting_patient, starting_patient+num_patients):\n # Load segmentation and volume\n vol = self.load_volume(i)\n vol = vol.get_data()\n\n # Convert to a visual format\n vol_ims = self.hu_to_grayscale(vol)\n\n for j in range(vol_ims.shape[0]):\n file_path = image_path / (\"{:05d}_{:03d}.png\".format(i, j))\n scipy.misc.imsave(str(file_path), vol_ims[j])","sub_path":"project/Pytorch/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423680484","text":"from db import get_db\nfrom flask import current_app,g\n\n\ndef get_data(num=\"0\", id=\"\", *args):\n with current_app.app_context():\n db = get_db()\n q = \"select \"\n for i in args:\n q += i+\", \"\n q = q[:-2]\n q += \" from post where id = ?\"\n if num == \"one\":\n post = db.execute(q, (id,)).fetchone()\n return post\n elif num == \"all\":\n post = db.execute(q, (id,)).fetchall()\n return post\n\n\ndef get_data_list(num=0):\n with current_app.app_context():\n db = get_db()\n posts = db.execute('''\n select id, title, author\n from post\n order by created desc\n limit ?\n ''', (num,)).fetchall()\n return posts\n\n\ndef delete_data(id=\"\"):\n with current_app.app_context():\n db = get_db()\n db.execute('''\n delete from post\n where id = ? ''', (id,))\n db.commit()\n","sub_path":"dbOpe.py","file_name":"dbOpe.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453108070","text":"\"\"\"Adds metrics\n\nRevision ID: a7ad08c851d8\nRevises: bbab2640b23b\nCreate Date: 2021-03-29 14:57:27.081092\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"a7ad08c851d8\"\ndown_revision = \"bbab2640b23b\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"metrics\",\n sa.Column(\"id\", sa.Integer(), autoincrement=True, nullable=False),\n sa.Column(\"created_at\", sa.DateTime(), nullable=False),\n sa.Column(\"kind\", sa.String(length=50), nullable=True),\n sa.Column(\"guild_xid\", sa.BigInteger(), nullable=True),\n sa.Column(\"channel_xid\", sa.BigInteger(), nullable=True),\n sa.Column(\"user_xid\", sa.BigInteger(), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\n op.f(\"ix_metrics_channel_xid\"), \"metrics\", [\"channel_xid\"], unique=False\n )\n op.create_index(op.f(\"ix_metrics_guild_xid\"), \"metrics\", [\"guild_xid\"], unique=False)\n op.create_index(op.f(\"ix_metrics_kind\"), \"metrics\", [\"kind\"], unique=False)\n op.create_index(op.f(\"ix_metrics_user_xid\"), \"metrics\", [\"user_xid\"], unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f(\"ix_metrics_user_xid\"), table_name=\"metrics\")\n op.drop_index(op.f(\"ix_metrics_kind\"), table_name=\"metrics\")\n op.drop_index(op.f(\"ix_metrics_guild_xid\"), table_name=\"metrics\")\n op.drop_index(op.f(\"ix_metrics_channel_xid\"), table_name=\"metrics\")\n op.drop_table(\"metrics\")\n","sub_path":"src/spellbot/versions/versions/a7ad08c851d8_adds_metrics.py","file_name":"a7ad08c851d8_adds_metrics.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284903195","text":"\"\"\"Transition elements on simplices.\"\"\"\n\nfrom itertools import product\nfrom ..finite_element import CiarletElement\nfrom ..polynomials import polynomial_set\nfrom ..functionals import PointEvaluation\nfrom ..quadrature import get_quadrature\nfrom ..symbolic import x, subs\nfrom .lagrange import Lagrange\n\n\nclass Transition(CiarletElement):\n \"\"\"Transition finite element.\"\"\"\n\n def __init__(self, reference, order,\n edge_orders=None, face_orders=None, variant=\"equispaced\"):\n if reference.name == \"triangle\":\n assert face_orders is None\n assert len(edge_orders) == 3\n elif reference.name == \"tetrahedron\":\n assert len(face_orders) == 4\n assert len(edge_orders) == 6\n\n bubble_space = Lagrange(reference, 1)\n\n dofs = []\n poly = polynomial_set(reference.tdim, 1, 1)\n for v_n, v in enumerate(reference.reference_vertices):\n dofs.append(PointEvaluation(v, entity=(0, v_n)))\n\n for edim in range(1, 4):\n for e_n in range(reference.sub_entity_count(edim)):\n entity = reference.sub_entity(edim, e_n)\n if edim == reference.tdim:\n entity_order = order\n elif edim == 1:\n entity_order = edge_orders[e_n]\n elif edim == 2:\n entity_order = face_orders[e_n]\n else:\n raise RuntimeError(\"Could not find order for this entity.\")\n\n # DOFs\n points, _ = get_quadrature(variant, entity_order + 1)\n for i in product(range(1, entity_order), repeat=edim):\n if sum(i) < entity_order:\n pt = entity.get_point([points[j] for j in i])\n dofs.append(PointEvaluation(pt, entity=(edim, e_n)))\n\n # Basis\n if entity_order > edim:\n if edim == reference.tdim:\n bubble = 1\n for f in bubble_space.get_basis_functions():\n bubble *= f\n elif edim == reference.tdim - 1:\n bubble = 1\n for i, f in enumerate(bubble_space.get_basis_functions()):\n if i != e_n:\n bubble *= f\n else:\n assert edim == 1 and reference.tdim == 3\n bubble = 1\n for i, f in enumerate(bubble_space.get_basis_functions()):\n if i in reference.edges[e_n]:\n bubble *= f\n space = Lagrange(entity, entity_order - edim - 1, variant=variant)\n vars = []\n origin = entity.vertices[0]\n used = []\n for p in entity.vertices[1:]:\n i = 0\n while p[i] == origin[i] or origin[i] == 1 or i in used:\n i += 1\n used.append(i)\n vars.append(origin[i] + (p[i] - origin[i]) * x[i])\n poly += [subs(f, x, vars) * bubble for f in space.get_basis_functions()]\n\n super().__init__(\n reference, order, poly, dofs, reference.tdim, 1\n )\n self.variant = variant\n self.face_orders = face_orders\n self.edge_orders = edge_orders\n\n def init_kwargs(self):\n \"\"\"Return the kwargs used to create this element.\"\"\"\n return {\"variant\": self.variant, \"face_orders\": self.face_orders,\n \"edge_orders\": self.edge_orders}\n\n names = [\"transition\"]\n references = [\"triangle\", \"tetrahedron\"]\n min_order = 1\n continuity = \"C0\"\n","sub_path":"symfem/elements/transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"348905925","text":"#!/usr/bin/env python\n\"\"\"\nThis is a simple simulation that implements a quadratic equation.\n\nWe are going to pretend it is a black box that we cannot modify.\nIt might have been written in any language and might implement\nvery complex calculations. PUQ assumes nothing. It just needs\na way to pass inputs parameters to the simulation, and a way\nto get the output. Because we are pretending we cannot modify\nthis program, we will have to create a wrapper script for it.\n\nThis example expects a command line with a filename.\nInput parameters are in a file formatted like this:\na=num\nb=num\nc=num\nx=num\n\nIt prints the result in a file called 'output.txt' formatted like this:\nThe answer is num.\n\n~/memosa/src/puq/examples/wrappers> cat input.txt\na=1\nb=2.0\nc=3\nx=4\n~/memosa/src/puq/examples/wrappers> ./sim_file.py input.txt\n~/memosa/src/puq/examples/wrappers> cat output.txt\nThe answer is 2.700000e+01.\n\"\"\"\n\nimport sys, re\ntry:\n # parse the input file\n filename = sys.argv[1]\n for line in open(filename, 'r'):\n exec(line)\n\n # now write the result to 'output.txt'\n out = open('output.txt', 'w')\n print >> out, \"The answer is %s.\" % format(a*x**2 + b*x + c, \"e\")\n out.close()\n\nexcept:\n # Something went wrong. Set errorcode on exit.\n raise\n sys.exit(1)\n","sub_path":"examples/wrappers/newdir/sim_file.py","file_name":"sim_file.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"444923117","text":"import json\nimport threading\nimport time\nimport websocket\nfrom logging import getLogger\n\n\nclass RealtimeAPI(object):\n \"\"\"\n Realtime API (JSON-RPC 2.0 over WebSocket)\n\n https://bf-lightning-api.readme.io/docs/realtime-api\n \"\"\"\n\n def __init__(self, channel, data_queue, is_daemon=False):\n self.logger = getLogger(__name__)\n self.ws = None\n self.running = False\n self.end_point = 'wss://ws.lightstream.bitflyer.com/json-rpc'\n self.channel = channel\n self.data_queue = data_queue\n self.is_daemon = is_daemon\n\n def on_open(self):\n self.logger.info('WebSocket connected')\n self.subscribe()\n\n def on_close(self):\n self.logger.info('WebSocket disconnected')\n\n def on_message(self, message):\n messages = json.loads(message)\n if 'method' not in messages or messages['method'] != 'channelMessage':\n return\n\n messages = messages['params']['message']\n for message in messages:\n self.data_queue.put(message)\n\n def on_error(self, error):\n self.logger.error(error)\n\n def subscribe(self):\n self.ws.send(json.dumps({'method': 'subscribe', 'params': {'channel': self.channel}}))\n\n def run(self):\n while self.running:\n try:\n self.ws = websocket.WebSocketApp(\n self.end_point,\n on_open=self.on_open,\n on_close=self.on_close,\n on_message=self.on_message,\n on_error=self.on_error)\n self.ws.run_forever()\n except Exception as e:\n self.logger.error(e)\n time.sleep(3)\n\n def start(self):\n self.logger.info('Start streaming')\n self.running = True\n thread = threading.Thread(target=self.run, daemon=self.is_daemon)\n thread.start()\n\n def stop(self):\n self.logger.info('Stop streaming')\n self.running = False\n self.ws.close()\n","sub_path":"py_bitflyer/realtime_api.py","file_name":"realtime_api.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"301949480","text":"import numpy as np\nimport pandas as pd\n\n\ndef compare_nested_dicts(first, second, epsilon=10E-6):\n \"\"\"Compares two dictionaries. Raises an assertion error when a difference is found.\"\"\"\n\n assert first.keys() == second.keys()\n\n for key in first.keys():\n if isinstance(first[key], dict):\n compare_nested_dicts(first[key], second[key])\n\n elif isinstance(first[key], np.ndarray):\n assert (compare_values_epsilon(first[key], second[key])).all()\n\n elif isinstance(first[key], pd.DataFrame):\n assert first[key].equals(second[key])\n\n elif isinstance(first[key], float):\n assert compare_values_epsilon(first[key], second[key])\n\n elif isinstance(first[key], list):\n compare_nested_iterables(first[key], second[key])\n\n else:\n assert first[key] == second[key], \"{} doesn't equal {}\".format(first[key], second[key])\n\n\ndef compare_values_epsilon(first, second, epsilon=10E-6):\n return abs(first - second) < epsilon\n\n\ndef compare_nested_iterables(first, second, epsilon=10E-6):\n\n for _first, _second in zip(first, second):\n\n if isinstance(_first, list):\n compare_nested_iterables(_first, _second)\n\n if isinstance(_first, float):\n assert compare_values_epsilon(_first, _second)\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"315893459","text":"import os\nimport re\nimport inspect\nimport sys\nfrom pynhost import grammarbase, utilities, commands\nfrom pynhost.platforms import platformhandler\ntry:\n from pynhost.grammars import _locals\nexcept:\n _locals = None\n\nclass GrammarHandler:\n def __init__(self):\n # grammar.app_context: [grammar instances with given app_content field]\n self.global_grammars = []\n self.active_global_grammars = []\n self.local_grammars = {}\n self.active_local_grammars = {}\n self.triggered = {\n 'word': {\n 'before': [],\n 'after': [],\n },\n 'match': {\n 'before': [],\n 'after': [],\n },\n 'command': {\n 'before': [],\n 'after': [],\n }\n }\n try:\n self.process_contexts = _locals.GLOBAL_CONTEXTS\n except AttributeError:\n self.process_contexts = {}\n\n def load_grammars(self):\n grammar_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'grammars')\n for module in utilities.get_modules_in_dir(grammar_dir):\n self.load_grammars_from_module(module)\n self.set_active_grammars()\n for context in self.local_grammars:\n self.local_grammars[context].sort()\n\n def load_grammars_from_module(self, module):\n clsmembers = inspect.getmembers(sys.modules[module.__name__], inspect.isclass)\n for member in clsmembers:\n # screen for objects with grammarbase.GrammarBase ancestor\n class_hierarchy = inspect.getmro(member[1])\n if len(class_hierarchy) > 2 and class_hierarchy[-2] == grammarbase.GrammarBase:\n grammar = self.initialize_grammar(member[1])\n app_pattern = grammar.app_context\n if grammar.app_context != '':\n app_pattern = re.compile(grammar.app_context)\n try:\n self.local_grammars[app_pattern].append(grammar)\n except KeyError:\n self.local_grammars[app_pattern] = [grammar]\n else:\n self.global_grammars.append(grammar)\n\n def set_active_grammars(self):\n try:\n self.active_global_grammars = utilities.filter_grammar_list(self.global_grammars, self.process_contexts)\n except KeyError:\n self.active_global_grammars = []\n self.active_local_grammars = {}\n self.active_global_grammars.sort(reverse=True)\n for app_pattern, grammar_list in self.local_grammars.items():\n active_list = utilities.filter_grammar_list(grammar_list, self.process_contexts)\n self.active_local_grammars[app_pattern] = active_list + self.active_global_grammars\n self.active_local_grammars[app_pattern].sort(reverse=True)\n\n def get_matching_grammars(self):\n active_window_name = platformhandler.get_active_window_name().lower()\n grammars = []\n for app_pattern in self.active_local_grammars:\n if app_pattern.search(active_window_name):\n grammars.extend(self.active_local_grammars[app_pattern])\n grammars.sort(reverse=True)\n return grammars or self.active_global_grammars\n\n# local grammar match = match grammar context and global\n# global grammar match = match global\n# no match: match open process grammars and global\n\n def add_actions_to_recording_macros(self, action_list):\n context = self.get_context(action_list)\n if context:\n grammars = []\n for app_pattern in self.active_local_grammars:\n if app_pattern.search(context):\n grammars.extend(self.active_local_grammars[app_pattern])\n else:\n grammars = self.active_global_grammars\n for grammar in grammars:\n for name in grammar._recording_macros:\n grammar._recording_macros[name].extend(action_list.actions)\n\n def get_context(self, action_list):\n if action_list.rule_match is None:\n return platformhandler.get_active_window_name().lower()\n return action_list.rule_match.rule.grammar.app_context\n\n def initialize_grammar(self, grammar_class):\n grammar = grammar_class()\n grammar._handler = self\n grammar.app_context = grammar.app_context.lower()\n grammar._set_rules()\n return grammar\n","sub_path":"pynhost/pynhost/grammarhandler.py","file_name":"grammarhandler.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342304849","text":"import sqlite3\nimport requests\nimport json\nfrom datetime import datetime\n\ndataset = list()\nurl = 'https://www.quandl.com/api/v3/datasets/BSE/BOM504067.json?api_key=fKXsYh9Qi9ySbS8X6pNh'\ndata = requests.get(url).text\nmydataset = json.loads(data)\nfor row in mydataset['dataset'][\"data\"]:\n tradedate = year = datetime.strptime(row[0], '%Y-%m-%d')\n open = float(row[1])\n close = float(row[4])\n dataset.append({'tradeDate':tradedate, 'open':open, 'close':close})\n\ntry:\n with sqlite3.connect('quandl') as db:\n cursor = db.cursor() # this will create a cursor object\n\n # dynamic query using biding\n sql = '''insert into zensar values(:tradeDate,:open, :close)'''\n cursor.executemany(sql, dataset)\n db.commit() # Only for DML queries\nexcept Exception as E:\n print('Error: ', E)\nelse:\n print(\"DataBase table emp inserted with records!\")\n","sub_path":"Day 17/poc2.py","file_name":"poc2.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378115321","text":"# Minecraft Turtle Example\nimport ministack\n\n\ndef tree(branchLen, t):\n if branchLen > 2:\n t.forward(branchLen)\n t.up(20)\n tree(branchLen - 2, t)\n t.down(40)\n tree(branchLen - 2, t)\n t.up(20)\n t.backward(branchLen)\n\n\n# get players position\npos = ministack.getPlayerPosition(\"xiaozhan\")\n\n# create minecraft turtle\nsteve = ministack.createTurtle()\n\n# point up\nsteve.setverticalheading(90)\n\n# set speed\nsteve.speed(0)\n\n# call the tree fractal\ntree(20, steve)\n","sub_path":"MC_turtle_examples/example_fractaltree.py","file_name":"example_fractaltree.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"112587887","text":"# Copyright 2018 Contributors to Hyperledger Sawtooth\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\"\"\"Test User Addresser\"\"\"\nimport logging\nimport pytest\n\nfrom rbac.common import addresser\nfrom tests.rbac.common.assertions import TestAssertions\n\nLOGGER = logging.getLogger(__name__)\n\n\n@pytest.mark.addressing\n@pytest.mark.library\nclass TestUserAddresser(TestAssertions):\n \"\"\"Test User Addresser\"\"\"\n\n def test_address(self):\n \"\"\"Tests address makes an address that identifies as the correct AddressSpace\"\"\"\n user_id = addresser.user.unique_id()\n user_address = addresser.user.address(user_id)\n self.assertIsAddress(user_address)\n self.assertEqual(\n addresser.address_is(user_address), addresser.AddressSpace.USER\n )\n\n def test_unique_id(self):\n \"\"\"Tests that unique_id generates a unique identifier and is unique\"\"\"\n id1 = addresser.user.unique_id()\n id2 = addresser.user.unique_id()\n self.assertIsIdentifier(id1)\n self.assertIsIdentifier(id2)\n self.assertNotEqual(id1, id2)\n\n def test_address_is(self):\n \"\"\"Tests that address_is returns AddressSpace.USER if it is a user\n address, and None if it is of another address type\"\"\"\n user_address = addresser.user.address(addresser.user.unique_id())\n role_address = addresser.role.address(addresser.role.unique_id())\n self.assertEqual(\n addresser.address_is(user_address), addresser.AddressSpace.USER\n )\n self.assertEqual(\n addresser.user.address_is(user_address), addresser.AddressSpace.USER\n )\n self.assertIsNone(addresser.user.address_is(role_address))\n self.assertEqual(\n addresser.address_is(role_address), addresser.AddressSpace.ROLES_ATTRIBUTES\n )\n\n def test_get_address_type(self):\n \"\"\"Tests that get_address_type returns AddressSpace.USER if it is a user\n address, and None if it is of another address type\"\"\"\n user_address = addresser.user.address(addresser.user.unique_id())\n role_address = addresser.role.address(addresser.role.unique_id())\n self.assertEqual(\n addresser.get_address_type(user_address), addresser.AddressSpace.USER\n )\n self.assertEqual(\n addresser.user.get_address_type(user_address), addresser.AddressSpace.USER\n )\n self.assertIsNone(addresser.user.get_address_type(role_address))\n self.assertEqual(\n addresser.get_address_type(role_address),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n\n def test_addresses_are(self):\n \"\"\"Test that addresses_are returns True if all addresses are a user\n addresses, and False if any addresses are if a different address type\"\"\"\n user_address1 = addresser.user.address(addresser.user.unique_id())\n user_address2 = addresser.user.address(addresser.user.unique_id())\n role_address = addresser.role.address(addresser.role.unique_id())\n self.assertTrue(addresser.user.addresses_are([user_address1]))\n self.assertTrue(addresser.user.addresses_are([user_address1, user_address2]))\n self.assertFalse(addresser.user.addresses_are([role_address]))\n self.assertFalse(addresser.user.addresses_are([user_address1, role_address]))\n self.assertFalse(addresser.user.addresses_are([role_address, user_address1]))\n self.assertTrue(addresser.user.addresses_are([]))\n\n def test_address_deterministic(self):\n \"\"\"Tests address makes an address that identifies as the correct AddressSpace\"\"\"\n user_id1 = addresser.user.unique_id()\n user_address1 = addresser.user.address(user_id1)\n user_address2 = addresser.user.address(user_id1)\n self.assertIsAddress(user_address1)\n self.assertIsAddress(user_address2)\n self.assertEqual(user_address1, user_address2)\n self.assertEqual(\n addresser.address_is(user_address1), addresser.AddressSpace.USER\n )\n\n def test_address_random(self):\n \"\"\"Tests address makes a unique address given different inputs\"\"\"\n user_id1 = addresser.user.unique_id()\n user_id2 = addresser.user.unique_id()\n user_address1 = addresser.user.address(user_id1)\n user_address2 = addresser.user.address(user_id2)\n self.assertIsAddress(user_address1)\n self.assertIsAddress(user_address2)\n self.assertNotEqual(user_address1, user_address2)\n self.assertEqual(\n addresser.address_is(user_address1), addresser.AddressSpace.USER\n )\n self.assertEqual(\n addresser.address_is(user_address2), addresser.AddressSpace.USER\n )\n\n def test_address_static(self):\n \"\"\"Tests address makes the expected output given a specific input\"\"\"\n user_id = \"966ab67317234df489adb4bc1f517b88\"\n expected_address = (\n \"bac00100003333e7570f3f6f7d2c1635f6deea1111ff00000000000000000000000000\"\n )\n user_address = addresser.user.address(user_id)\n self.assertIsAddress(user_address)\n self.assertEqual(user_address, expected_address)\n self.assertEqual(\n addresser.address_is(user_address), addresser.AddressSpace.USER\n )\n","sub_path":"tests/rbac/common/addresser/user_test.py","file_name":"user_test.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"288158819","text":"n = int(input())\ns = set(map(int, input().split())) \na = int(input())\n\nfor i in range(a):\n k = []\n k = input().split()\n if k[0] == 'pop':\n s.pop()\n elif k[0] == 'remove':\n s.remove(int(k[1]))\n elif k[0] == 'discard':\n s.discard(int(k[1]))\n else:\n print('not a command')\n\nprint(sum(s))\n","sub_path":"0036. Remove Elements Set.py","file_name":"0036. Remove Elements Set.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390490142","text":"from flask import current_app as capp\nfrom eve.utils import home_link\nfrom .labels import LABELS\nimport re\n\n\ndef get_cfg():\n \"\"\"\n Will get all necessary data out of the eve-app.\n It reads 'SERVER_NAME', 'API_NAME', and'DOMAIN' out of app.config as well\n as app.url_map\n\n The Hirarchy of Information is:\n 1. list all endpoints from url_map\n 2. update with data out of DOMAIN (specific fields)\n\n :returns: dict with 'base', 'server_name', 'api_name', 'domains' for\n template\n \"\"\"\n cfg = {}\n base = home_link()['href']\n if '://' not in base:\n protocol = capp.config['PREFERRED_URL_SCHEME']\n print(base)\n base = '{0}://{1}'.format(protocol, base)\n\n cfg['base'] = base\n cfg['domains'] = {}\n cfg['server_name'] = capp.config['SERVER_NAME']\n cfg['api_name'] = capp.config.get('API_NAME', 'API')\n # 1. parse rules from url_map\n cfg['domains'] = parse_map(capp.url_map)\n # 2. Load schemas and paths from the config and update cfg\n domains = {}\n for domain, resource in list(capp.config['DOMAIN'].items()):\n if resource['item_methods'] or resource['resource_methods']:\n # hide the shadow collection for document versioning\n if 'VERSIONS' not in capp.config or not \\\n domain.endswith(capp.config['VERSIONS']):\n domains[domain] = endpoint_definition(domain, resource)\n\n cfg['domains'].update(domains)\n return cfg\n\n\ndef parse_map(url_map):\n \"\"\"\n will extract information out of the url_map and provide them in a dict-form\n :param url_map: an url_map in the format like app.url_map from eve\n :returns: empty dict if url-endpoints with methods\n \"\"\"\n ret = {}\n for rule in url_map.iter_rules():\n line = str(rule)\n # first part if the rule specifies the endpoint\n # between the first two '/' is the resource\n resource = line.split(\"/\")[1]\n # the endpoint is described by a regex, but we want only the name\n path = re.sub(r'<(?:[^>]+:)?([^>]+)>', '{\\\\1}', line)\n if resource not in ret:\n # this is the first path of this resource, create dict-entry\n ret[resource] = {'paths': {}, 'description': {}}\n # add path to dict\n ret[resource]['paths'][path] = {}\n for method in rule.methods:\n if method in ['GET', 'POST', 'PATCH', 'PUT', 'DELETE']:\n # we only display these methods, other HTTP-Methods don't need\n # documentation\n ret[resource]['paths'][path][method] = {}\n return ret\n\n\ndef identifier(resource):\n name = resource['item_lookup_field']\n ret = {\n 'name': name,\n 'type': 'string',\n 'required': True,\n }\n return ret\n\n\ndef endpoint_definition(domain, resource):\n \"\"\"\n gets the documentation of a specified endpoint\n :param domain: the endpoint\n :param resource: the resource-subdict of config['DOMAIN']\n :returns: the documentation as a dict (paths, methods, fields)\n \"\"\"\n ret = {}\n ret['description'] = resource.get('description', {})\n ret['paths'] = paths(domain, resource)\n return ret\n\n\ndef schema(resource, field=None):\n \"\"\"extracts the detailed cerberus-schema of this endpoint\n :param resource: the resource of the endpoint\n :param field: the field for which the schema will be returned.\n If no field specified, return a dict for all fields of the endpoint\n :returns: schema as dict\n \"\"\"\n ret = []\n if field is not None:\n params = {field: resource['schema'][field]}\n else:\n params = resource['schema']\n for field, attrs in list(params.items()):\n template = {\n 'name': field,\n 'type': 'None',\n 'required': False,\n }\n template.update(attrs)\n ret.append(template)\n # If the field defines a schema, add any fields from the nested\n # schema prefixed by the field name\n if 'schema' in attrs and all(isinstance(v, dict)\n for v in list(attrs['schema'].values())):\n for subfield in schema(attrs):\n subfield['name'] = field + '.' + subfield['name']\n ret.append(subfield)\n # If the field defines a key schema, add any fields from the nested\n # schema prefixed by the field name and a * to denote the wildcard\n if 'keyschema' in attrs:\n attrs['schema'] = attrs.pop('keyschema')\n for subfield in schema(attrs):\n subfield['name'] = field + '.*.' + subfield['name']\n ret.append(subfield)\n return ret\n\n\ndef paths(domain, resource):\n \"\"\"returns the documentation of all endpoints of a domain for which we have\n descriptions in the config\n :param domain: the domain of the endpoints\n :param resource: the resource-subdict of config['DOMAIN']\n :returns: dict with paths and their documentation (methods, fields)\n \"\"\"\n ret = {}\n path = '/{0}'.format(resource.get('url', domain))\n path = re.sub(r'<(?:[^>]+:)?([^>]+)>', '{\\\\1}', path)\n pathtype = 'resource'\n ret[path] = methods(domain, resource, pathtype)\n\n primary = identifier(resource)\n path = '{0}/{1}'.format(path, pathparam(primary['name']))\n pathtype = 'item'\n ret[path] = methods(domain, resource, pathtype)\n\n alt = resource.get('additional_lookup', None)\n if alt is not None:\n path = '/{0}/{1}'.format(domain, pathparam(alt['field']))\n pathtype = 'additional_lookup'\n ret[path] = methods(domain, resource, pathtype, alt['field'])\n return ret\n\n\ndef methods(domain, resource, pathtype, param=None):\n \"\"\"extracts mathods and descriptions of a sepcified path\n :param domain: the domain of the endpoint\n :param resource: the resource-subdict of config['DOMAIN']\n :param pathtype: String from ('item', 'resource')\n :param param:\n :returns: dict of methods and their documentation (fields)\n \"\"\"\n ret = {}\n if pathtype == 'additional_lookup':\n method = 'GET'\n ret[method] = {}\n ret[method]['label'] = get_label(domain, pathtype, method)\n ret[method]['params'] = schema(resource, param)\n else:\n key = '{0}_methods'.format(pathtype)\n methods = resource[key]\n for method in methods:\n ret[method] = {}\n ret[method]['label'] = get_label(domain, pathtype, method)\n ret[method]['params'] = []\n if method == 'POST':\n ret[method]['params'].extend(schema(resource))\n elif method == 'PATCH':\n ret[method]['params'].append(identifier(resource))\n ret[method]['params'].extend(schema(resource))\n elif pathtype == 'item':\n ret[method]['params'].append(identifier(resource))\n return ret\n\n\ndef pathparam(param):\n return '{{{0}}}'.format(param)\n\n\ndef get_label(domain, pathtype, method):\n \"\"\"a description of what the method does (e.g. PATCH will upadate an item)\n :param domain: the domain of the endpoint\n :param pathtype: String from ('item', 'resource')\n :param method: the method for this label\n :returns: description as a string\n \"\"\"\n verb = LABELS[method]\n if method == 'POST' or pathtype != 'resource':\n noun = capp.config['DOMAIN'][domain]['item_title']\n article = 'a'\n else:\n noun = domain\n article = 'all'\n return '{0} {1} {2}'.format(verb, article, noun)\n","sub_path":"eve_docs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522102280","text":"#_*_ coding:utf-8_*_\nimport pythoncom, pyHook\nfrom win32api import OpenProcess\nfrom win32process import GetWindowThreadProcessId, GetModuleFileNameEx\nfrom win32con import PROCESS_VM_READ, PROCESS_QUERY_INFORMATION\nimport os.path\nfrom PyQt4.QtCore import QObject, pyqtSignal, QTimer\nfrom key import KeySequence, KeySequences\nfrom collections import deque\nfrom modifierskeystate import ModifiersKeyState\nfrom mode import Mode\nfrom keydictnode import KeyDictNode\nfrom shortcutinfo import ShortcutInfo, ShortcutsInfo\nfrom actiontype import ActionType\nfrom enum import Enum\nimport win32api\nimport win32con\nimport re\nimport win32gui\nfrom clipboard import Clipboard\n\n\nclass FuncDictDecorator(object):\n\n FUNC_DICT = dict()\n FUNC_CODE = 300\n @classmethod\n def inner_func(cls, func_name):\n def set_func(func):\n cls.FUNC_DICT[func_name] = func.__name__\n def return_func(*args, **kwargs):\n return func(*args, **kwargs)\n return return_func\n return set_func\n\n @classmethod\n def func_dict(cls):\n return cls.FUNC_DICT\n\nclass SearchResultType(Enum):\n NOT_MATCH_BREAK = 1\n\n\nclass EnVimKey(QObject):\n\n about_to_quit = pyqtSignal()\n changed_mode = pyqtSignal(str, str, int)\n about_to_search = pyqtSignal(object)\n\n def __init__(self, parent = None):\n QObject.__init__(self, parent = parent)\n\n # フックマネージャーを生成\n self.hook_manager = pyHook.HookManager()\n # KeyDown, KeyUpで呼ばれる関数を設定\n self.hook_manager.KeyDown = self.on_keyboard_event\n self.hook_manager.KeyUp= self.on_keyboard_event\n\n self.modifiers_key_state = ModifiersKeyState()\n\n self.timer = QTimer(self)\n self.timer.setInterval(800)\n self.timer.setSingleShot(True)\n self.timer.timeout.connect(self.on_timeout)\n self.timer_running = False\n\n self.window_dict = dict()\n self.candidates = deque()\n\n self.key_sequences = deque()\n self.window_id = None\n self.clipboard = Clipboard()\n self.current_node = None\n self.root_node = KeyDictNode(dict())\n for mode in Mode:\n self.root_node.set_key_dictionary(mode)\n for shortcuts_info in shortcuts_tuple:\n shortcuts_info.set_mode_shortcuts(self.root_node)\n self.current_exe_name = \"\"\n self._mode = Mode.NORMAL\n self.set_mode_node()\n self.waiting_return_key = \"\"\n self.func_dict = FuncDictDecorator.func_dict()\n\n\n @FuncDictDecorator.inner_func(\"insert\")\n def change_to_insert_mode(self):\n self._mode = Mode.INSERT\n self.set_mode_node()\n self.changed_mode.emit(\"Mode\", \"Insert\", 500)\n print(\"insert\")\n\n @FuncDictDecorator.inner_func(\"normal\")\n def change_to_normal_mode(self):\n self._mode = Mode.NORMAL\n self.set_mode_node()\n self.changed_mode.emit(\"Mode\", \"Normal\", 500)\n print(\"normal\")\n\n @FuncDictDecorator.inner_func(\"visual\")\n def change_to_visual_mode(self):\n self._mode = Mode.VISUAL\n self.set_mode_node()\n self.changed_mode.emit(\"Mode\", \"Visual\", 500)\n print(\"visual\")\n\n @FuncDictDecorator.inner_func(\"focus\")\n def set_window_focus(self, search_window_name):\n def match_window_name(window_handle, dummy):\n window_title = str(win32gui.GetWindowText(window_handle))\n if window_name_pattern.match(window_title):\n self.target_window_handle = window_handle\n\n self.target_window_handle = None\n window_name_pattern = re.compile(search_window_name)\n win32gui.EnumWindows(match_window_name, \"\")\n if self.target_window_handle:\n win32gui.SetForegroundWindow(self.target_window_handle)\n win32gui.ShowWindow(self.target_window_handle, win32con.SW_MAXIMIZE)\n\n @FuncDictDecorator.inner_func(\"quit\")\n def quit_envimkey(self):\n self.about_to_quit.emit()\n\n @FuncDictDecorator.inner_func(\"copy\")\n def copy_to_clipboard(self,text):\n self.before_text = self.clipboard.get()\n self.clipboard.set(text)\n\n @FuncDictDecorator.inner_func(\"rollback\")\n def rollback_to_clipboard(self):\n self.clipboard.set(self.before_text)\n\n def set_mode_node(self):\n self.current_node = self.root_node.dictionary[self._mode]\n # self.print_dict(self.root_node.dictionary)\n\n def timer_start(self):\n print(\"start\")\n self.timer_running = True\n self.timer.start()\n\n def timer_stop(self):\n print(\"stop\")\n self.timer.stop()\n self.timer_running = False\n self.set_mode_node()\n if self.waiting_return_key:\n self.emulate_keys(self.waiting_return_key)\n self.waiting_return_key = None\n self.key_sequences.clear()\n if self.key_sequences:\n self.emulate_keys(KeySequences.from_sequences(self.key_sequences).action_atoms)\n self.key_sequences.clear()\n\n def emulate_keys(self, keys):\n print(keys)\n for action_atom in keys:\n if action_atom.type == ActionType.PRESS:\n if action_atom.extended:\n win32api.keybd_event(action_atom.info, 0, win32con.KEYEVENTF_EXTENDEDKEY | 0, 0)\n else:\n win32api.keybd_event(action_atom.info, 0, 0, 0)\n # win32api.keybd_event(action_atom.info, 0, win32con.KEYEVENTF_EXTENDEDKEY | win32con.KEYEVENTF_KEYUP, 0)\n win32api.keybd_event(action_atom.info, 0, win32con.KEYEVENTF_KEYUP, 0)\n elif action_atom.type == ActionType.DOWN:\n win32api.keybd_event(action_atom.info, 0, 0, 0)\n elif action_atom.type == ActionType.UP:\n win32api.keybd_event(action_atom.info, 0, win32con.KEYEVENTF_KEYUP, 0)\n else:\n func_arg = action_atom.info.args\n if func_arg is not None:\n getattr(self, self.func_dict[action_atom.info.text])(func_arg)\n else:\n getattr(self, self.func_dict[action_atom.info.text])()\n\n def search(self, key_text, exe_name):\n # self.print_dict(self.current_node.dictionary)\n print(exe_name)\n self.waiting_return_key = \"\"\n if not(self.timer_running):\n try:\n self.current_node = self.current_node.dictionary[exe_name]\n except KeyError:\n # マッチする実行ファイル名が登録されていない場合\n self.timer_stop()\n return\n try:\n node = self.current_node.dictionary[key_text]\n self.current_node = node\n # ノードの辞書が空で、結果が確定する場合\n if not(node.dictionary):\n self.key_sequences.clear()\n self.timer_stop()\n self.emulate_keys(node.return_key)\n return\n # 結果の候補が存在し、一定時間待機する必要がある場合\n if node.has_return_key:\n self.timer_start()\n self.waiting_return_key = node.return_key\n return\n # マッチしたが次の入力がなければ結果が確定しない場合\n self.timer_start()\n return\n # マッチするキーが存在しない場合\n except KeyError:\n if len(self.key_sequences) == 1:\n self.key_sequences.clear()\n self.timer_stop()\n return SearchResultType.NOT_MATCH_BREAK\n self.timer_stop()\n return\n\n def on_keyboard_event(self, event):\n # 自分でエミュレートしたイベントならそのままイベントを通す\n if event.Injected:\n # print(event.KeyID)\n return True\n\n # デバッグ用にescを押したら終了するようにしておく\n if event.KeyID == 27:\n self.timer.stop()\n self.about_to_quit.emit()\n return False\n\n window_id = event.Window\n # 前回のイベントとwindow_idが異なれば\n if self.window_id != window_id:\n self.window_id = window_id\n # キーイベントを受け取るアプリケーションのexe名を取得\n self.current_exe_name = self.get_exe_name(window_id)\n print(self.current_exe_name)\n\n code = event.KeyID\n # event.Transitionはpressなら0, releaseなら128\n is_press_event = not(event.Transition)\n # モディファイアーが押されているかチェックし、modifiers_key_stateの状態を更新する\n pressed_modifier = self.modifiers_key_state.pressing(code, is_press_event)\n\n # モディファイアーが押されていれば以降の処理をしない\n if pressed_modifier:\n print(\"modifier\", code)\n return True\n\n # キーがReleaseされるイベントなら以降の処理をしない\n if not(is_press_event):\n return True\n\n # キーコードとモディファイアーの状態をもとにキーシークエンスを生成\n seq = KeySequence.from_code(code, self.modifiers_key_state.presses())\n\n self.key_sequences.append(seq)\n\n search_result = self.search(seq.text, self.current_exe_name)\n\n if search_result == SearchResultType.NOT_MATCH_BREAK:\n return True\n\n # Falseを返すとイベントを無視し、Trueを返すとイベントを処理する\n return False\n\n def get_exe_name(self, window_id):\n try:\n base_exe_name = self.window_dict[window_id]\n except KeyError:\n # プロセスIDを取得\n thread_id, process_id = GetWindowThreadProcessId(window_id)\n # プロセスハンドルを取得\n process_handle = OpenProcess(PROCESS_VM_READ|PROCESS_QUERY_INFORMATION, 0, process_id)\n # キーを受け取るアプリケーションのexe名を取得\n name = GetModuleFileNameEx(process_handle, 0)\n # .exeを除く\n base_exe_name = os.path.basename(name)[:-4]\n self.window_dict[window_id] = base_exe_name\n return base_exe_name\n\n def _window_enum_callback(self, hwnd, wildcard):\n is_visible = win32gui.IsWindowVisible(hwnd)\n if not(is_visible):\n return\n\n title = str(win32gui.GetWindowText(hwnd))\n if title != \"\" and title != \"Program Manager\":\n try:\n exe_name = self.get_exe_name(hwnd)\n except:\n exe_name = \"\"\n self.candidates.append((title, exe_name, hwnd))\n\n def find_window_wildcard(self):\n self.candidates.clear()\n win32gui.EnumWindows(self._window_enum_callback, \"\")\n\n @FuncDictDecorator.inner_func(\"search\")\n def search_exe(self):\n self.find_window_wildcard()\n self.about_to_search.emit(self.candidates)\n\n def key_unlock(self):\n # キーボードフックを解除する\n self.hook_manager.UnhookKeyboard()\n self.thread().quit()\n\n def keylock(self):\n # キーボードフックを開始する\n self.hook_manager.HookKeyboard()\n pythoncom.PumpWaitingMessages()\n\n def print_dict(self, d, depth = 0):\n def print_indent(depth, *args, **kwargs):\n print(\" \"*8*depth, *args, **kwargs)\n\n for x, y in d.items():\n if len(y.dictionary):\n print_indent(depth, x)\n self.print_dict(y.dictionary, depth + 1)\n if y.has_return_key:\n print_indent(depth, x, y.return_key)\n\n def on_timeout(self):\n print(\"timeout\")\n self.timer_stop()\n\ninsert_info = ShortcutsInfo(\n Mode.INSERT,\n \"insert\",\n (\n ShortcutInfo(\"jj\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"jw\", \"\\\"\\\"\"),\n ShortcutInfo(\"jq\", \"\\'\\'\"),\n ShortcutInfo(\"jp\", \"()\"),\n ShortcutInfo(\"jl\", \"[]\"),\n ShortcutInfo(\"js\", \"_\"),\n ShortcutInfo(\"ja\", \"-\"),\n ShortcutInfo(\"je\", \"=\"),\n ShortcutInfo(\"jka\", \"&\"),\n ShortcutInfo(\"jke\", \"!\"),\n ShortcutInfo(\"jkp\", \"+\"),\n ShortcutInfo(\"jkx\", \"*\"),\n ShortcutInfo(\"jkd\", \"$\"),\n ShortcutInfo(\"jks\", \"#\"),\n ShortcutInfo(\"jky\", \"\\\\\"),\n ShortcutInfo(\"jkh\", \"^\"),\n ShortcutInfo(\"jkt\", \"~\"),\n ShortcutInfo(\"jko\", \"|\"),\n ShortcutInfo(\"jkc\", \":\"),\n ShortcutInfo(\"jkq\", \"?\"),\n ShortcutInfo(\"jkm\", \"@\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"\", \"\"),\n )\n )\n\nnormal_info = ShortcutsInfo(\n Mode.NORMAL,\n \"normal\",\n (\n ShortcutInfo(\"QQ\", \"\"),\n ShortcutInfo(\"h\", \"\"),\n ShortcutInfo(\"l\", \"\"),\n ShortcutInfo(\"ov\", \"\"),\n ShortcutInfo(\"ot\", \"\"),\n ShortcutInfo(\"oc\", \"\"),\n ShortcutInfo(\"v\", \"\"),\n ShortcutInfo(\"V\", \"\"),\n ShortcutInfo(\"i\", \"\"),\n ShortcutInfo(\"a\", \"\"),\n ShortcutInfo(\"I\", \"\"),\n ShortcutInfo(\"A\", \"\"),\n ShortcutInfo(\"ok\", \"\"),\n # ShortcutInfo(\"oj\", \"\"),\n ShortcutInfo(\"oj\", \"\"),\n ShortcutInfo(\"ga\", \"\"),\n ShortcutInfo(\"ge\", \"\"),\n ShortcutInfo(\"h\", \"\"),\n ShortcutInfo(\"j\", \"\"),\n ShortcutInfo(\"k\", \"\"),\n ShortcutInfo(\"l\", \"\"),\n ShortcutInfo(\"x\", \"\"),\n ShortcutInfo(\"G\", \"\"),\n ShortcutInfo(\"gz\", \"\"),\n ShortcutInfo(\"gg\", \"\"),\n ShortcutInfo(\"q\", \"\"),\n ShortcutInfo(\"D\", \"\"),\n ShortcutInfo(\"Y\", \"\"),\n ShortcutInfo(\"p\", \"\"),\n ShortcutInfo(\"yy\", \"\"),\n ShortcutInfo(\"dd\", \"\"),\n ShortcutInfo(\",tl\", \"\"),\n ShortcutInfo(\",ta\", \"\"),\n ShortcutInfo(\",tq\", \"\"),\n ShortcutInfo(\"dw\", \"\"),\n ShortcutInfo(\"db\", \"\"),\n ShortcutInfo(\"b\", \"\"),\n ShortcutInfo(\"w\", \"\"),\n ShortcutInfo(\"u\", \"\"),\n ShortcutInfo(\"r\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"J\", \"\"),\n ShortcutInfo(\"K\", \"\"),\n ShortcutInfo(\"n\", \"\"),\n ShortcutInfo(\"N\", \"\"),\n ShortcutInfo(\"e\", \"\"),\n ShortcutInfo(\"a\", \"\"),\n ShortcutInfo(\"KC\", \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"),\n ShortcutInfo(\"se\", \"\"),\n )\n )\n\nvisual_info = ShortcutsInfo(\n Mode.VISUAL,\n \"visual\",\n (\n ShortcutInfo(\"l\", \"\"),\n ShortcutInfo(\"h\", \"\"),\n ShortcutInfo(\"j\", \"\"),\n ShortcutInfo(\"k\", \"\"),\n ShortcutInfo(\"b\", \"\"),\n ShortcutInfo(\"w\", \"\"),\n ShortcutInfo(\"ga\", \"\"),\n ShortcutInfo(\"ge\", \"\"),\n ShortcutInfo(\"G\", \"\"),\n ShortcutInfo(\"gg\", \"\"),\n ShortcutInfo(\"d\", \"\"),\n ShortcutInfo(\"y\", \"\"),\n ShortcutInfo(\"x\", \"\"),\n ShortcutInfo(\"v\", \"\"),\n )\n )\n\n\n\nshortcuts_tuple=(\n ShortcutsInfo(\n Mode.NORMAL,\n \"chrome\",\n (\n ShortcutInfo(\"dv\", \"\"),\n ShortcutInfo(\",tt\", \"\"),\n ShortcutInfo(\"/\", \"\"),\n ShortcutInfo(\"s\", \"\"),\n ShortcutInfo(\"U\", \"\"),\n ShortcutInfo(\",ts\", \"\"),\n ShortcutInfo(\",te\", \"\"),\n ShortcutInfo(\"\", \"\"),\n ShortcutInfo(\"f\", \"\"),\n ShortcutInfo(\"fl\", \"\"),\n ),\n deque([normal_info])\n ),\n ShortcutsInfo(\n Mode.INSERT,\n \"chrome\",\n (\n ),\n deque([insert_info])\n ),\n ShortcutsInfo(\n Mode.VISUAL,\n \"chrome\",\n (\n ),\n deque([visual_info])\n ),\n ShortcutsInfo(\n Mode.NORMAL,\n \"itunes\",\n (\n ShortcutInfo(\"q\", \"\"),\n ShortcutInfo(\"ga\", \"\"),\n )\n ),\n ShortcutsInfo(\n Mode.NORMAL,\n \"ConEmu64\",\n (\n ShortcutInfo(\"q\", \"\"),\n ShortcutInfo(\"ga\", \"\"),\n ShortcutInfo(\"i\", \"\"),\n ),\n deque([normal_info])\n ),\n ShortcutsInfo(\n Mode.INSERT,\n \"ConEmu64\",\n (\n ShortcutInfo(\"q\", \"\"),\n ShortcutInfo(\"ga\", \"\"),\n ShortcutInfo(\"jj\", \"\"),\n ),\n deque([insert_info])\n ),\n ShortcutsInfo(\n Mode.NORMAL,\n \"gvim\",\n (\n ShortcutInfo(\"oc\", \"\"),\n ShortcutInfo(\"ot\", \"\"),\n )\n ),\n ShortcutsInfo(\n Mode.NORMAL,\n \"python\",\n (\n ),\n deque([normal_info])\n ),\n ShortcutsInfo(\n Mode.INSERT,\n \"python\",\n (\n ),\n deque([insert_info])\n ),\n)\n","sub_path":"envimkey.py","file_name":"envimkey.py","file_ext":"py","file_size_in_byte":19765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"124647876","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport socket,threading,time,os,subprocess,cmd,sys\n\n#设置项:\nserver_addr='0.0.0.0' \t#默认监听地址\nserver_port=2146\t\t#默认监听端口\nserver_coding='utf-8'\t#编码\nserver_passwd=None\t\t#口令,None则不使用\n#server_passwd='123'\n\nclis=[] \t#当前已连接客户端的列表\nrooms={}\t#当前房间列表\n\n\ndef main():\n print('Welcome.')\n\n global server_addr\n global server_port\n if len(sys.argv)==2: #如果有命令行参数\n try:\n server_port=int(sys.argv[1])\n except Exception as e:\n print('Error:',e)\n askAddr()\n else:\n askAddr() #向用户询问addr和port\n\n t = threading.Thread(target=tcplinstener) #创建线程对象,入口点tcplinstener\n t.start() #启动线程\n\n time.sleep(0.2) #等待0.2秒\n\n CmdProcessor().cmdloop() #进入交互命令解释器的循环\n\ndef askAddr():\n global server_addr\n global server_port\n setaddr = input('Set Address (default %s):' % server_addr)\n if setaddr!='':\n server_addr=setaddr\n setport = input('Set Port (default %d):' % server_port)\n if setport!='':\n server_port=int(setport)\n\ndef tcplinstener():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #创建一个socket对象\n s.bind((server_addr, server_port)) #将socket绑定到设定的地址和端口\n s.listen(5) #开始监听,等待连接队列的最大长度设为5\n print(timestamp(),'Listening on',server_addr,':',server_port)\n while True: #接受客户端连接请求的循环\n sock, addr = s.accept() #等待客户端连接\n t = threading.Thread(target=Client(sock,addr).start)#创建一个Client对象,\n \t\t\t\t#并创建一个新线程对象,入口点为↑这个对象的start方法\n t.start() #启动线程\n\ndef timestamp(): #返回时间戳文本\n return time.strftime('[%H:%M:%S]')\n\ndef decode(data): #解码数据\n return str(data, server_coding)\n\ndef encode(text): #编码文本\n return bytes(text, server_coding)\n\nclass CmdProcessor(cmd.Cmd): #交互命令解释器 (一大坨烂代码)\n def __init__(self):\n super(CmdProcessor, self).__init__()\n self.prompt='>'\n def do_EOF(self, line):\n self.do_exit('')\n def do_exit(self, line):\n print('Bye~')\n exit()\n def do_clis(self, line):\n \"\"\"Print client list.\"\"\"\n if len(clis)<1:\n print('No Client.')\n else:\n print(len(clis), 'Client(s):')\n for cli in clis:\n print(cli.addr)\n def do_rooms(self, line):\n \"\"\"Print room list.\"\"\"\n if len(rooms)<1:\n print('No Room.')\n else:\n print(len(rooms),'Room(s)')\n print('[ID]\\t[Name]')\n for room in rooms.values():\n print(' ',room.id,'\\t',room.name, sep='')\n def do_newroom(self, line):\n \"\"\"newroom [name] Create a room.\"\"\"\n if not line:\n Room()\n else:\n Room(line)\n print('OK.')\n def do_say(self, line):\n \"\"\"say Send text to all clients.\"\"\"\n data = encode(line+'\\r\\n')\n for cli in clis:\n cli.beginSend(data)\n print(timestamp(), 'sending to', len(clis), 'client(s)')\n def do_delroom(self, line):\n \"\"\"delroom Delete a room.\"\"\"\n if not line:\n print(self.do_delroom.__doc__)\n return\n try:\n try:\n i=int(line)\n except Exception:\n print('\"'+line+'\"','is not a number.')\n return\n \n if i in rooms:\n rooms[int(line)].deleteRoom()\n print('OK.')\n else:\n print('Error: No such room.')\n except Exception as ex:\n print('Error:', ex)\n \n \nclass Client(object):\n def __init__(self, sock, addr):\n self.sock=sock #绑定sock到这个对象\n self.mf=self.sock.makefile(encoding=server_coding, errors='ignore')\n self.addr=addr #同上\n self.addrstr= '(%s:%d)' % addr #用来显示的客户端地址字符串\n self.room=None #房间待选择\n self.name=None #名字待输入\n\n def start(self):\n try:\n self._start() #其实完全可以把_start里的代码搬到这里的说……\n except self.RecvError: #捕获_start方法内部调用的remoteInput方法抛出的RecvError\n print(timestamp(),'Disconnceted with error:',self.addrstr)\n except self.RecvZero: #同上\n print(timestamp(),'Disconnceted:',self.addrstr)\n finally: #临死前要做的三件事:\n if self.room!=None: #如果在房间里,\n self.room.clientOut(self) #要从房间里出来;\n if self in clis: #如果在客户端列表里,\n clis.remove(self) #要从里面删掉;\n self.sock.close() #还要记得关闭连接。\n #print(timestamp(),'Disconnceted:',self.addrstr)\n\n class RecvError(BaseException):pass\n class RecvZero(BaseException):pass\n\n def _start(self):\n print(timestamp(),'New connection form', self.addrstr)\n clis.append(self) #添加这个客户端对象到列表里\n if(self.login()): #输密码/输名字……\n print(timestamp(), 'Login:', self.addrstr, 'with name:', self.name)\n else:\n self.sock.close()\n print(timestamp(), 'Failed Login:', self.addrstr)\n return\n \n while True: #选房间循环,直到选择了房间才会跳出(类似的循环在此class经常出现)\n text='Rooms:\\r\\n'\n for i,room in rooms.items():\n text+='\\t{0}) {1} (now {2} users)\\r\\n'.format(i, room.name, len(room.clis))\n text+='\\tn) New Room.\\r\\nPlease Choose:'\n line = self.remoteInput(text) #读取客户端的输入\n if line==None: #开始判断\n return\n elif line=='' or line.isspace():\n continue\n elif line=='n':\n line = self.remoteInput(\"Set Room's Name (optional):\")\n if line=='':\n self.room = Room() #创建一个新房间\n else:\n self.room = Room(line) #创建一个新房间(指定名字)\n break #跳出房间循环\n elif line.isdigit():\n try:\n self.room=rooms[int(line)]\n break #跳出房间循环\n except Exception as e:\n self.send(encode('Input error or room not found.\\r\\n'))\n self.room.clientIn(self) #“通知”房间有客户端加入\n #self.send(encode(\"You are in Room '{0}' now.\\r\\n\".format(self.room.name)))\n while True: #读取聊天消息循环\n textline = self.remoteInput(noNewLine=True, sendNewLine=False)\n if textline == '' or textline.isspace():\n continue\n else:\n if not self.room.closed:\n self.room.textForm(self, textline)\n #print(timestamp(), self.addrstr, textline)\n\n \n def remoteInput(self, text=None, noNewLine=True, sendNewLine=True):\n \t\t\t\t#实际客户端的一行输入\n if text:\n self.sock.sendall(encode(text))\n try:\n line = self.mf.readline()\n except Exception as e: #如果捕获到异常\n raise self.RecvError() #抛出RecvError\n #print(encode(line))\n if line:\n if noNewLine:\n line = line.strip('\\r\\n')\n if sendNewLine:\n self.send(encode('\\r\\n'))\n return line\n else: #“接收”到0字节数据,说明已断开连接\n raise self.RecvZero() #抛出RecvZero\n\n def login(self): #(因为代码太长(lan)所以单独作一个方法)\n self.send(encode('Welcome! (server coding={0})\\r\\n'.format(server_coding)))\n if server_passwd!=None and server_passwd!='': #如果有口令就问口令\n while True:\n passwd = self.remoteInput('Password:')\n if passwd == server_passwd:\n self.sock.sendall(encode('OK.\\r\\n'))\n break\n else:\n failedCount+=1\n if(failedCount>=3):\n return False\n print(timestamp(), 'Failed Login:', self.addrstr)\n while True: #询问名字\n name = self.remoteInput(\"Input your name:\")\n if name: #如果非空字符串\n for cli in clis: \n if name==cli.name:\n self.send(encode('Sorry, this name is already in use.\\r\\n'))\n break\n else: #如果没有被break\n self.send(encode('Welcome, ' + name + '\\r\\n'))\n self.name=name\n break\n return True\n\n def beginSend(self, datas): #异步发送数据\n t = threading.Thread(target=self.sock.sendall, args=(datas,))\n t.setDaemon(True)\n t.start()\n\n def send(self, datas): #向此客户端发送数据\n self.sock.sendall(datas)\n \n_roomID=0\nclass Room(object):\n def __init__(self, name=None):\n global _roomID\n _roomID+=1\n self.id=_roomID\n if name==None:\n self.name='Unnamed'\n else:\n self.name=name\n self.closed=False\n rooms[self.id]=self\n self.clis=[]\n\n def sendToClis(self, text):\n data2send=encode(text)\n for cli in self.clis:\n cli.beginSend(data2send)\n\n def clientIn(self, client):\n self.clis.append(client)\n self.sendToClis('\\r\\n{0} {1} is joined.'.format(timestamp(), client.name))\n \n def textForm(self, sendcli, text):\n print('\\r\\n{0} {{{3}}} {1} : {2}'.format(timestamp(), sendcli.name, text, self.id))\n self.sendToClis('\\r\\n{0} {1} : {2}'.format(timestamp(), sendcli.name, text))\n\n def clientOut(self, client):\n self.clis.remove(client)\n self.sendToClis('\\r\\n{0} {1} is leave.'.format(timestamp(), client.name))\n\n def deleteRoom(self):\n if self.closed:\n print('Error: this room is already closed.')\n return\n self.closed=True\n self.sendToClis('\\r\\n{0} this room is closed.'.format(timestamp()))\n rooms.pop(self.id)\n\nmain()\n","sub_path":"ChatRoomsServer.py","file_name":"ChatRoomsServer.py","file_ext":"py","file_size_in_byte":10544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"486989728","text":"#!/usr/bin/env python\r\n\"\"\"\r\nDownload all the posts from a subreddit and export it in xlsx.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport logging\r\nimport os\r\nimport time\r\n\r\nimport pandas as pd\r\nimport praw\r\nfrom tqdm import tqdm\r\n\r\nlogger = logging.getLogger()\r\nSTARTTIME = time.time()\r\nBUFFER = 1000\r\n\r\n\r\ndef main(args):\r\n\r\n subreddit = args.subreddit\r\n file = args.file\r\n temps = args.time\r\n folder = \"Subreddit\"\r\n type = \"xls\"\r\n timemois = int(time.time()) - 600000\r\n\r\n # Définition du dernier timestamp à télécharger\r\n if not temps is None:\r\n logger.debug(\"Timestamp défini\")\r\n endtime = int(temps)\r\n else:\r\n logger.debug(\"Timestamp non défini\")\r\n endtime = int(time.time())\r\n # Définition du premier timestamp à télécharger\r\n if not file is None:\r\n logger.debug(\"Fichier défini\")\r\n nom, type = os.path.splitext(file)\r\n beginningtime = int(nom.rsplit(\"_\")[-1])\r\n subreddit = str(nom.rsplit(\"_\")[-2])\r\n file = file.find('posts_')\r\n if beginningtime >= timemois:\r\n beginningtime = timemois\r\n elif args.beginning is not None:\r\n beginningtime = args.beginning\r\n else:\r\n logger.debug(\"Fichier non défini\")\r\n beginningtime = 1212086987\r\n\r\n a = 0\r\n number = BUFFER\r\n steptime = endtime\r\n df = pd.DataFrame()\r\n\r\n # Tant que le nombre de posts est égal au buffer\r\n while (number == BUFFER):\r\n logger.debug(\"beginningtime = \" + str(beginningtime))\r\n logger.debug(\"endtime = \" + str(endtime))\r\n # récupération du dataframe (inversé) des posts, du nombre de posts, et du dernier timestamp\r\n df, number, steptime = fetch_posts(\r\n df, subreddit, beginningtime, steptime, a)\r\n # Suppression de la dernière ligne du dataframe, puisque la prochaine recherche l'incluera\r\n if number == BUFFER:\r\n df = df.drop(df.index[len(df) - 1])\r\n a = a + 1\r\n logger.debug(\"While machin, boucle numéro \" + str(a))\r\n logger.debug(\"endtime est \" + str(endtime))\r\n\r\n df = df.sort_values(\"Date\")\r\n # Si un fichier a déjà été défini, on concatène son contenu avec le dataframe\r\n if not file is None:\r\n logger.debug(\"Fichier défini\")\r\n df = concatenate_excel(df, args.file, beginningtime)\r\n logger.debug(\"Opening subreddit folder...\")\r\n if not os.path.exists(folder):\r\n os.makedirs(folder)\r\n os.chdir(folder)\r\n logger.debug(\"Opening subreddit folder DONE\")\r\n\r\n # export du dataframe\r\n export(df, subreddit, endtime)\r\n\r\n # affichage du temps de traitement\r\n runtime = time.time() - STARTTIME\r\n print(\"Runtime : %.2f seconds\" % runtime)\r\n\r\n\r\ndef fetch_posts(df, subreddit, beginningtime, endtime, a):\r\n \"\"\"\r\n Extrait les commentaires du subreddit subreddit entre les timestamp \\\r\n beginningtime et endtime. Renvoie un dataframe panda\r\n \"\"\"\r\n n = 0\r\n columns = [\"id\", \"Name\", \"Date\", \"Score\", \"Ratio\", \"Nbr_comments\",\r\n \"Flair\", \"Domain\", \"Self text\", \"url\", \"permalink\",\r\n \"Author\", \"Author_flair_css\", \"Author_flair_text\", \"Gilded\"]\r\n reddit = redditconnect('test')\r\n sub = reddit.subreddit(subreddit)\r\n dd = []\r\n # Récupération des posts entre beginningtime et endtime\r\n for x in sub.submissions(beginningtime, endtime):\r\n n = n + 1\r\n timestamp = x.created_utc\r\n print(\"Fetching post {} for subreddit {} at timestamp {}…\".format(\r\n str(n), str(subreddit), str(timestamp)))\r\n dd.append({\"Score\": x.score,\r\n \"Author\": str(x.author),\r\n \"Author_flair_css\": str(x.author_flair_css_class),\r\n \"Author_flair_text\": str(x.author_flair_text),\r\n \"Ratio\": x.upvote_ratio,\r\n \"id\": x.name,\r\n \"permalink\": str(\"https://reddit.com\" + x.permalink),\r\n \"Name\": x.title,\r\n \"url\": x.url,\r\n \"Nbr_comments\": x.num_comments,\r\n \"Date\": timestamp,\r\n \"Flair\": str(x.link_flair_text),\r\n \"Self text\": str(x.selftext_html),\r\n \"Domain\": x.domain,\r\n \"Gilded\": int(x.gilded)\r\n })\r\n\r\n # Sortie de la boucle si buffer atteint\r\n if n == BUFFER:\r\n print(\"N = \" + str(BUFFER) + \", arrêt…\")\r\n break\r\n print(\"Fetching posts DONE.\")\r\n print(\"Creating pandas dataframe…\")\r\n # Code à optimiser (d, dd, df)\r\n dd = pd.DataFrame(dd)\r\n dd = dd[columns]\r\n df = df.append(dd)\r\n logger.debug(\"Creating pandas dataframe DONE.\")\r\n return df, n, timestamp\r\n\r\n\r\ndef concatenate_excel(df, file, beginningtime):\r\n \"\"\"\r\n Concatène le dataframe et le fichier excel entré en param.\r\n Renvoie un dataframe.\r\n \"\"\"\r\n logger.debug(\"Concatenate_excel\")\r\n df_old = pd.read_excel(file)\r\n df_old = df_old.loc[df_old['Date'] < beginningtime]\r\n logger.debug(\"fichier lu\")\r\n df = df_old.append(df)\r\n logger.debug(\"nouveau dataframe créé\")\r\n return df\r\n\r\n\r\ndef export(df, subreddit, endtime):\r\n \"\"\"\r\n Fonction d'export générale.\r\n Excel uniquement supporté pour le moment\r\n \"\"\"\r\n logger.debug(\"Début de l'export excel\")\r\n export_excel(df, subreddit, endtime)\r\n\r\n\r\ndef export_excel(df, subreddit, endtime):\r\n \"\"\"\r\n Fonction d'export vers excel.\r\n \"\"\"\r\n filename = \"posts_{}_{}.xlsx\".format(str(subreddit), str(int(endtime)))\r\n writer = pd.ExcelWriter(filename, engine='xlsxwriter',\r\n options={'strings_to_urls': False})\r\n logger.debug(\"df.to_excel\")\r\n df.to_excel(writer, sheet_name='Sheet1', index=False)\r\n logger.debug(\"writer.save\")\r\n writer.save()\r\n\r\n\r\ndef redditconnect(bot):\r\n \"\"\"\r\n Fonction de connexion à reddit\r\n \"\"\"\r\n user_agent = \"python:script:download_posts_subreddit\"\r\n\r\n reddit = praw.Reddit(bot, user_agent=user_agent)\r\n return reddit\r\n\r\n\r\ndef parse_args():\r\n\r\n parser = argparse.ArgumentParser(\r\n description='Download all the posts of a specific subreddit')\r\n parser.add_argument(\r\n '-f', '--file', type=str, help='The name of the file containing the old data, must contains the timestamp of the last record')\r\n parser.add_argument('-s', '--subreddit', type=str,\r\n help='The subreddit to download posts from. Default : /r/france', default=\"france\")\r\n parser.add_argument('-t', '--time', type=int,\r\n help='The max unixstamp to download', default=None)\r\n parser.add_argument('--debug', help=\"Affiche les informations de déboguage\",\r\n action=\"store_const\", dest=\"loglevel\", const=logging.DEBUG, default=logging.WARNING)\r\n parser.add_argument('-b', '--beginning', type=int,\r\n help='the min unixstamp to download', default=None)\r\n args = parser.parse_args()\r\n\r\n logging.basicConfig(level=args.loglevel)\r\n return args\r\n\r\n\r\nif __name__ == '__main__':\r\n main(parse_args())\r\n","sub_path":"download_posts_subreddit.py","file_name":"download_posts_subreddit.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313362338","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom typing import Type\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom torch.autograd import Variable\n\nfrom .base import BackProp\nfrom .vanilla import VanillaBackprop\n\n\nclass SmoothGrad(BackProp):\n \"\"\"\n SmoothGrad\n generate smoothed gradients out of backpropagation from image\n by adding random noise to input and average the gradient\n\n https://arxiv.org/abs/1706.03825\n https://github.com/utkuozbulak/pytorch-cnn-visualizations/blob/master/src/smooth_grad.py\n\n Args:\n backprop_model (VanillaBackprop): object to generate gradient\n param_n (int): Amount of images used to smooth gradient\n param_sigma_multiplier (int): Sigma multiplier when calculating std of noise\n \"\"\"\n\n child_backprop: BackProp\n param_n: int\n param_sigma_multiplier: int\n target_class_num: int\n\n def __init__(self,\n model: nn.Module,\n param_n: int,\n param_sigma_multiplier: int,\n device: torch.device = None,\n backpropCls: Type[BackProp] = VanillaBackprop\n ) -> None:\n\n # super init is not called intentionally because this object utilizes\n # child BackProp object for main calculation.\n\n self.child_backprop = backpropCls(model, device)\n self.param_n = param_n\n self.param_sigma_multiplier = param_sigma_multiplier\n\n def generate_gradients(self,\n input_image: Variable,\n target_class: int\n ) -> np.ndarray:\n\n # Generate an empty image/matrix\n smooth_grad = np.zeros(input_image.size()[1:])\n\n mean = 0\n sigma = self.param_sigma_multiplier / \\\n (torch.max(input_image) - torch.min(input_image)).data[0]\n\n # prepare numpy image array to initialize leaf variable\n input_image_np = input_image.cpu().data.numpy()\n\n for _ in range(self.param_n):\n # Generate noise\n noise = np.random.normal(mean, sigma, size=input_image_np.shape)\n # Add noise to the image\n noisy_imp_np = input_image_np + noise\n noisy_img = torch.from_numpy(\n noisy_imp_np\n ).type(\n torch.FloatTensor\n ).to(\n self.child_backprop.device\n )\n noisy_img.requires_grad_()\n # Calculate gradients\n vanilla_grads = self.child_backprop.generate_gradients(\n noisy_img, target_class)\n # Add gradients to smooth_grad\n smooth_grad = smooth_grad + vanilla_grads\n\n # Average it out\n smooth_grad = smooth_grad / self.param_n\n return smooth_grad\n","sub_path":"cnn_visualization/backprop/smoothgrad.py","file_name":"smoothgrad.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"582345845","text":"from pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = figure()\nax = Axes3D(fig)\nX = np.arange(-4, 4, 0.25)\nY = np.arange(-4, 4, 0.25)\nX, Y = np.meshgrid(X, Y)\nR = np.sqrt(X**2 + Y**2)*pi\nZ = np.tan(R)*np.pi\n\nax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='cool')\n\nshow()\n","sub_path":"matplot/3d.py","file_name":"3d.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129041130","text":"# coding = utf-8\n# author = xy\n\nimport torch\nfrom torch import nn\n\n\nclass Rnn(nn.Module):\n def __init__(self, mode, input_size, hidden_size, dropout_p, bidirectional, layer_num, is_bn, batch_first=False):\n super(Rnn, self).__init__()\n\n self.mode = mode\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.dropout_p = dropout_p\n self.direction_num = bidirectional\n self.layer_num = layer_num\n self.is_bn = is_bn\n\n if mode == 'LSTM':\n self.rnn = nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=layer_num,\n bidirectional=bidirectional,\n dropout=dropout_p if layer_num > 1 else 0\n )\n elif mode == 'GRU':\n self.rnn = nn.GRU(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=layer_num,\n bidirectional=bidirectional,\n dropout=dropout_p if layer_num > 1 else 0\n )\n if is_bn:\n self.layer_norm = nn.LayerNorm(input_size)\n self.drop = nn.Dropout(p=dropout_p)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\" use xavier_uniform to initialize GRU/LSTM weights\"\"\"\n ih = (param for name, param in self.named_parameters() if 'weight_ih' in name)\n hh = (param for name, param in self.named_parameters() if 'weight_hh' in name)\n b = (param for name, param in self.named_parameters() if 'bias' in name)\n\n for t in ih:\n torch.nn.init.xavier_uniform_(t)\n for t in hh:\n torch.nn.init.orthogonal_(t)\n for t in b:\n torch.nn.init.constant_(t, 0)\n\n def forward(self, vec, mask, need_final_state=False):\n \"\"\"\n :param vec: tensor (seq_len, batch_size, input_size)\n :param mask: tensor (batch_size, seq_len)\n :return: outputs: tensor (seq_len, batch_size, hidden_size*bidirectional)\n \"\"\"\n\n # layer normalization\n if self.is_bn:\n seq_len, batch_size, input_size = vec.shape\n vec = vec.contiguous().view(-1, input_size)\n vec = self.layer_norm(vec)\n vec = vec.view(seq_len, batch_size, input_size)\n\n # dropout\n vec = self.drop(vec)\n\n # 这种方式\n lengths = mask.long().sum(1)\n lengths_sort, idx_sort = torch.sort(lengths, descending=True)\n _, idx_unsort = torch.sort(idx_sort)\n\n v_sort = vec.index_select(1, idx_sort)\n v_pack = nn.utils.rnn.pack_padded_sequence(v_sort, lengths_sort)\n outputs, state = self.rnn(v_pack, None)\n outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)\n outputs = outputs.index_select(1, idx_unsort)\n\n # 获得最后状态\n if need_final_state:\n # state = state.index_select(1, idx_unsort)\n\n len_dix = (lengths-1).view(-1, 1).expand(-1, outputs.size(2)).unsqueeze(0)\n state = outputs.gather(0, len_dix)\n state = state.squeeze(0)\n\n # 填充\n max_len = torch.max(lengths).item()\n seq_len = vec.size(0)\n if max_len != seq_len:\n pad_len = seq_len - max_len\n batch_size = vec.size(1)\n hidden_size = self.hidden_size * 2 if self.direction_num else self.hidden_size\n padding = outputs.new_zeros(pad_len, batch_size, hidden_size)\n outputs = torch.cat([outputs, padding], dim=0)\n\n if need_final_state:\n return outputs, state\n else:\n return outputs\n","sub_path":"modules/layers/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503119778","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n__author__ = 'xiaosong Liu'\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom LogisticRegression import LR_Classifier\nfrom sklearn.cross_validation import train_test_split\nfrom ImagePreprocessing import load_images,image_show,image_expansion,image_grayscale\n\n\ndef image_aspect_ratio(image):\n img_height = float(image.shape[0])\n img_width = float(image.shape[1])\n\n return img_width / img_height\n\ndef image_pixel_sum(image):\n img_height = float(image.shape[0])\n img_width = float(image.shape[1])\n\n return img_width * img_height\n\ndef image_binarization_ratio(image):\n black_pixel_count = float(np.sum(image==0))\n pixel_count = float(image.shape[0] * image.shape[1])\n \n return black_pixel_count / pixel_count\n\ndef load_train_samples():\n first_path = 'split_pic_temp'\n secend_path = '0123456789abcdefghijklmnopqrstuvwxyz'\n\n sample_vec = [(image, index) for index, path in enumerate(secend_path) \n for image in load_images(os.path.join(first_path, path))]\n print(\"sample_count = %d\" % len(sample_vec)) \n return sample_vec\n\n\ndef data_preprocessing(images):\n img_expansion = [image_expansion(image_grayscale(image)) for image in images]\n #print(img_expansion[0].shape)\n x_vals = np.array([image.ravel() for image in img_expansion])\n # data normalization\n x_vals = np.array([np.where(x==0,255,0) for x in x_vals])\n\n #print(x_vals.shape)\n #print(y_vals.shape)\n return x_vals, img_expansion\n\ndef train_model(x_vals, y_vals):\n samples = load_train_samples()\n images, labels = zip(*samples)\n x_vals, _ = data_preprocessing(images,labels)\n y_vals = np.array(labels)\n\n model = LR_Classifier()\n model = LR_Classifier(learning_rate=0.01, training_epoch=3000, display_step=300)\n model.fit(x_vals,y_vals)\n\n return model\n\ndef prediction(model,test_images):\n test_vals, test_img_exp = data_preprocessing(test_images)\n predictions = model.pred(test_vals)\n\n return predictions\n\ndef accuracy(model, x_test, y_test):\n predictions = model.pred(x_test)\n accuracy_ = np.mean(np.equal(predictions,y_test).astype(np.float32))\n\n return accuracy_\n\ndef visualization_loss(x_vals, y_vals):\n plt.plot(x_vals, y_vals, 'k--', label='loss-values')\n plt.title('train-loss')\n plt.xlabel('training epoch counts')\n plt.ylabel('training loss values')\n plt.legend(loc='upper right')\n plt.show()\n\ndef pick_hyperparam(s,e):\n m = np.log10(s)\n n = np.log10(e)\n r = np.random.rand()\n r = m + (n-m)*r\n r = np.power(10, r)\n\n return r\n\ndef main():\n samples = load_train_samples()\n images, labels = zip(*samples)\n x_vals, _ = data_preprocessing(images)\n y_vals = np.array(labels)\n\n x_train, x_test, y_train, y_test = train_test_split(x_vals, y_vals, test_size=0.2, random_state=0)\n\n #alpha, lambd = 0.01, 0.64\n for i in range(10):\n alpha = pick_hyperparam(0.009, 0.02)\n lambd = pick_hyperparam(0.5, 0.7)\n print('alpha = %0.4f , lambda = %0.4f' % (alpha, lambd))\n model = LR_Classifier(learning_rate=alpha, training_epoch=1200, display_step=240, regularization_term=lambd)\n #flag = model.load_model()\n flag = False\n model.fit(x_train,y_train,load_flag=flag)\n\n #评估模型对测试样本的泛化能力\n accuracy_ = accuracy(model, x_test, y_test)\n print('Accuracy = %.3f' % accuracy_)\n\n #可视化训练集的loss值\n #epoch, loss = zip(*model.loss_recoding)\n #visualization_loss(epoch, loss)\n\n #test_images = load_images('test_temp')\n #test_labels = \n #predictions = prediction(model, x_test)\n #label = '0123456789abcdefghijklmnopqrstuvwxyz'\n #for image,index in zip(test_images,predictions):\n # print(label[index], end='\\t')\n # image_show(image)\n\n\nif __name__ == '__main__':\n main() \n#samples = load_samples()\n#for image,label in samples[10:12]:\n# print(label)\n# print(image.shape)\n# image_show(image)\n\n'''\nimg_vec = load_images('split_pic_temp')\n\nimg_aspect_ratio_vec = []\nimg_pixel_count_vec = []\nimg_binarization_ratio_vec = []\n\nfor image in img_vec:\n img_aspect_ratio = image_aspect_ratio(image)\n img_aspect_ratio_vec.append(img_aspect_ratio)\n img_pixel_count = image_pixel_sum(image)\n img_pixel_count_vec.append(img_pixel_count)\n img_binarization_ratio = image_binarization_ratio(image)\n img_binarization_ratio_vec.append(img_binarization_ratio)\n\nplt.plot(img_pixel_count_vec, img_binarization_ratio_vec, 'ko', label='picel count vs aspect ratio ')\n#plt.plot(class2_x, class2_y, 'kx', label='Class -1')\nplt.title('image feature')\nplt.xlabel('picel count')\nplt.ylabel('aspect ratio')\nplt.legend(loc='lower right')\nplt.show()\n\n'''\n\n","sub_path":"ImageClassifier.py","file_name":"ImageClassifier.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"351677853","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.core.window import Window\n\nMILES_TO_KM = 1.61\n\n\nclass FromScratch(App):\n def build(self):\n Window.size = (600, 200)\n self.title = \"from_scratch\"\n self.root = Builder.load_file('from_scratch.kv')\n return self.root\n\n def handle_convert(self):\n result = float(self.root.ids.miles_input.text) * MILES_TO_KM\n self.root.ids.output_label.text = str(result) + \"\\n\" + 'Oops, \"m\" in the screenshot is not a good abbreviation for \"miles\"'\n\n def handle_increment(self, value):\n increment_calculation = float(self.root.ids.miles_input.text) + value\n self.root.ids.miles_input.text = str(increment_calculation)\n self.handle_convert()\n\nFromScratch().run()","sub_path":"prac_06/from_scratch.py","file_name":"from_scratch.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453680132","text":"#import aller pakete\nfrom datetime import datetime\nfrom main import app\nimport os\nfrom PIL import Image\nimport wtforms_json\nimport json\nfrom os import urandom\n#import fertig\n\n#funktion um daten aus dem json zu laden, wird ab Version 2 des Apps nicht mehr genutzt, da Mongo DB verwendet wird\ndef load_values():\n datei_json = \"rezepte_data.json\"\n\n try:\n with open(datei_json) as open_file:\n datei_inhalt = json.load(open_file)\n except FileNotFoundError:\n datei_inhalt = {}\n\n\n return datei_inhalt\n\n\ndef load_json(json_path):\n try:\n with open(json_path) as open_file:\n user_data = json.load(open_file)\n except FileNotFoundError:\n user_data = {}\n\n return user_data\n\ndef update_json(json_path_up, user_data_up):\n with open(json_path_up, \"w\") as jsonFile:\n json.dump(user_data_up, jsonFile)\n\ndef save_json(json_path, user_data):\n with open(json_path, \"w\", encoding=\"utf-8\") as open_file:\n json.dump(user_data, open_file, indent=4)\n\n#funktion im bilder beim upload zu speichern\ndef save_pictures(from_picture):\n\trandom = urandom(8).hex()\n\t_, b_ext = os.path.splitext(from_picture.filename)\n\tpicture_fn = random + b_ext\n\tpicture_path = os.path.join(app.root_path, 'static/img/rzp_images', picture_fn)\n\n\t#print(\"da gehts nicht weiter\")\n\toutput_size = (500, 500)\n\ti = Image.open(from_picture)\n\ti.thumbnail(output_size)\n\ti.save(picture_path)\n\n\treturn picture_fn\n","sub_path":"webpage/main/daten.py","file_name":"daten.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"271296645","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 25 17:17:38 2021\n\n@author: lferiani\n\"\"\"\n\nimport tqdm\nimport shutil\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\n\nfrom tierpsy.analysis.split_fov.FOVMultiWellsSplitter import (\n FOVMultiWellsSplitter, process_image_from_name)\n\nfrom tierpsy import DFLT_SPLITFOV_PARAMS_PATH, DFLT_SPLITFOV_PARAMS_FILES\nfrom tierpsy.helper.params.tracker_param import SplitFOVParams\nfrom tierpsy.analysis.compress.selectVideoReader import selectVideoReader\n\n\ndef test_from_raw():\n\n # where are things\n wd = Path('~/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY/').expanduser()\n raw_fname = (\n wd / 'RawVideos' / '20191205' /\n 'syngenta_screen_run1_bluelight_20191205_151104.22956805' /\n 'metadata.yaml'\n )\n masked_fname = Path(\n str(raw_fname)\n .replace('RawVideos', 'MaskedVideos_')\n .replace('.yaml', '.hdf5')\n )\n\n masked_fname.parent.mkdir(parents=True, exist_ok=True)\n\n json_fname = Path(DFLT_SPLITFOV_PARAMS_PATH) / 'HYDRA_96WP_UPRIGHT.json'\n\n splitfov_params = SplitFOVParams(json_file=json_fname)\n shape, edge_frac, sz_mm = splitfov_params.get_common_params()\n uid, rig, ch, mwp_map = splitfov_params.get_params_from_filename(\n masked_fname)\n px2um = 12.4\n\n # read image\n vid = selectVideoReader(str(raw_fname))\n status, img = vid.read_frame(0)\n\n fovsplitter = FOVMultiWellsSplitter(\n img,\n microns_per_pixel=px2um,\n well_shape=shape,\n well_size_mm=sz_mm,\n well_masked_edge=edge_frac,\n camera_serial=uid,\n rig=rig,\n channel=ch,\n wells_map=mwp_map)\n fig = fovsplitter.plot_wells()\n\n with open(masked_fname, 'w') as fid:\n pass\n fovsplitter.write_fov_wells_to_file(masked_fname)\n shutil.rmtree(masked_fname.parent)\n\n return\n\n\ndef test_from_new_fov_wells():\n masked_fname = Path(\n '/Users/lferiani/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY/'\n 'MaskedVideos/20191205/'\n 'syngenta_screen_run1_bluelight_20191205_151104.22956805/metadata.hdf5'\n )\n\n fs_from_wells = FOVMultiWellsSplitter(masked_fname)\n fs_from_wells.plot_wells()\n return\n\n\ndef test_from_old_fov_wells():\n # test from masked video with old /fov_wells\n # when building from wells, no need for json\n masked_fname = Path(\n '/Users/lferiani/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY/'\n '_MaskedVideos/20191205/'\n 'syngenta_screen_run1_bluelight_20191205_151104.22956805/metadata.hdf5'\n )\n fs_from_old_wells = FOVMultiWellsSplitter(masked_fname)\n fs_from_old_wells.plot_wells()\n\n\ndef test_from_imgs():\n json_fname = Path(DFLT_SPLITFOV_PARAMS_PATH) / 'HYDRA_96WP_UPRIGHT.json'\n wd = Path(\n '/Volumes/behavgenom$/Luigi/Data/'\n 'LoopBio_calibrations/wells_mapping/20190710/')\n img_dir = wd\n fnames = list(img_dir.rglob('*.png'))\n fnames = [str(f) for f in fnames if '_wells' not in str(f)]\n\n plt.ioff()\n for fname in tqdm.tqdm(fnames):\n process_image_from_name(fname, json_fname)\n plt.ion()\n\n\nif __name__ == '__main__':\n\n test_from_raw()\n test_from_new_fov_wells()\n test_from_old_fov_wells()\n # test_from_imgs()","sub_path":"tierpsy/debugging/test_fovsplitting.py","file_name":"test_fovsplitting.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556626742","text":"#!/usr/bin/python3\n\n##\n# This will act as a wrapper for curses so that we can\n# implement it into our board.\n##\n\nimport curses\n\nclass graphics:\n \"\"\"A graphics class that acts as a wrapper for\n curses\"\"\"\n def __init__(self):\n self.screen = curses.initscr()\n curses.noecho()\n curses.cbreak()\n curses.curs_set(0)\n self.screen.keypad(True)\n\n def stop(self):\n \"\"\"Execute controlled tear down of curses.\"\"\"\n curses.nocbreak()\n curses.curs_set(1)\n self.screen.keypad(False)\n curses.echo()\n curses.endwin()\n","sub_path":"graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629575444","text":"#!/usr/bin/env python\n\n\"\"\"\nThis is a test bed script to plot a bitsteam with its\nsymbols shaped. \n\"\"\"\nimport sys\nsys.path.append(\"../../sim\")\n\nimport numpy as np\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\nfrom siggens import train_pulse as gen\nfrom siggens import PRN_bitstreams as prn\nfrom utils import freqaxis_shape as ut\nfrom modulators import constallation_mappers as mod\nfrom modulators import up_convertors as upcon\n\n##################### Parameters ######################\nf_sampl = 900e3 # sampling frequency in kHz\nT_int = 1 # entire signal length in ms\n\n##################### Simulation ######################\nt = np.arange(0, T_int, 1 / f_sampl) # time axis\n# f = ut.freq_fr_time (t)\t\t\t\t# frequency axis\ncd = prn.gold_seq(2, 6, 1) # code\nTs = 1 / 1023 # Nyquist's symbol interval\ntau = 1 # time acceleration factor\nTstr = Ts * tau # transmitted symbol interval\nbitrate = 1 / Tstr\ntd = 0.0 # initial delay of the sequence (time offset)\n\n# baseband signals\na1 = mod.rcos_bpsk_map(t, cd, bitrate, pw=Ts, alpha=1.0)\na2 = mod.rcos_bpsk_map(t, cd, bitrate, pw=Ts, alpha=0.5)\na3 = mod.rcos_bpsk_map(t, cd, bitrate, pw=Ts, alpha=0.0)\nc = mod.rect_tr(t, Tstr, 0, td, cd)\n\n##################### Plots ###########################\nplt.figure(1, figsize=(10, 15), dpi=300)\n\n# Time domain\n\nax1 = plt.subplot(311)\nplt.plot(t, np.real(a1), '-r', label='$x_{rcos}(t), \\\\beta = 1.0$')\nplt.plot(t, np.real(a2), '-b', label='$x_{rcos}(t), \\\\beta = 0.5$')\nplt.plot(t, np.real(a3), '-g', label='$x_{rcos}(t), \\\\beta = 0.0$')\nplt.plot(t, c, '-k', label='$x_{rect}(t)$')\nplt.grid(True)\nplt.legend(loc='upper left', bbox_to_anchor=(1.12, 1.35))\nplt.title('Pulse-shaped baseband signal')\nplt.ylabel('Voltage')\nplt.xlabel('Time')\nplt.axis([0, 10, -0.3, 1.3])\n\nplt.show()\n","sub_path":"srcpy/sim/ICMT_17/corrkeysight_rcos__gold.py","file_name":"corrkeysight_rcos__gold.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"358496988","text":"import requests\nfrom src.constants import API_KEY\n\n\ndef get_or_create(session, model, **kwargs):\n \"\"\"\n Get or create a database model. If it already exists, simply returns it.\n \"\"\"\n instance = session.query(model).filter_by(**kwargs).first()\n if instance:\n return instance\n else:\n instance = model(**kwargs)\n session.add(instance)\n session.commit()\n return instance\n\ndef get_stream(stream_id):\n \"\"\"\n Requests info about a certain stream.\n \"\"\"\n return requests.get(\n 'https://www.googleapis.com/youtube/v3/videos', \n params={\n 'part': 'snippet,liveStreamingDetails', \n 'id': stream_id, \n 'key': API_KEY\n }\n ).json()\n\ndef get_messages(chat_id):\n \"\"\"\n Requests the last 200 messages of a chat.\n \"\"\"\n return requests.get(\n 'https://www.googleapis.com/youtube/v3/liveChat/messages', \n params={\n 'liveChatId': chat_id, \n 'part': 'snippet,authorDetails', \n 'key': API_KEY\n }\n ).json()['items']\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"254824531","text":"import io\nimport os\nimport numpy as np\nimport pandas as pd\nfrom .datasetbase import DataSetBase\n\n\nclass CSVDataset(DataSetBase):\n\n def __init__(self, config, preprocessing=None, data=None):\n super().__init__(config)\n self.features = config['dataset']['features']\n self.labels = config['dataset']['labels']\n self.preprocessing = preprocessing\n self.load(config['dataset']['datapath'], data)\n def prepare(self, df):\n float_cols = [c for c in df if df[c].dtype == np.float64]\n df[float_cols] = df[float_cols].astype(np.float32)\n if self.preprocessing:\n df = self.preprocessing.process(df)\n df = df.dropna()\n for label in self.labels:\n if not label in df:\n df[label] = 0.0\n return df\n\n def load(self, datapath='.', data=None):\n tables = []\n if data:\n if type(data) is bytes:\n data = io.BytesIO(data)\n data.seek(0)\n df = pd.read_csv(data)\n df = self.prepare(df)\n tables.append(df)\n elif type(data) is list:\n self.files = data\n for f in self.files:\n df = pd.read_csv(f)\n df = self.prepare(df)\n tables.append(df)\n else:\n self.files = sorted([f for f in os.listdir(datapath)\n if f.endswith('.csv') or f.endswith('.gz')])\n for f in self.files:\n df = pd.read_csv(os.path.join(datapath, f))\n df = self.prepare(df)\n tables.append(df)\n self.data = pd.concat(tables, axis=0, ignore_index=True)\n self.data = self.data.reindex()\n\n def __getitem__(self, idx):\n return self.data[self.features].loc[idx].astype(np.float32).values, self.data[self.labels].loc[idx].astype(np.float32).values\n\n def __len__(self):\n return len(self.data)\n\n def __str__(self):\n return \"csv_dataset, features: \" + str(self.features) + \" labels: \" + str(self.labels) + \" rows: \" + str(len(self.data)) + str(self.files)\n","sub_path":"zworkflow/dataset/csv_dataset.py","file_name":"csv_dataset.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399049040","text":"\"\"\"Entrypoints for osbuild\n\nThis module contains the application and API entrypoints of `osbuild`, the\ncommand-line-interface to osbuild. The `osbuild_cli()` entrypoint can be safely\nused from tests to run the cli.\n\"\"\"\n\n\nimport argparse\nimport json\nimport os\nimport sys\n\nimport osbuild\nimport osbuild.meta\nimport osbuild.monitor\n\n\nRESET = \"\\033[0m\"\nBOLD = \"\\033[1m\"\nRED = \"\\033[31m\"\nGREEN = \"\\033[32m\"\n\n\ndef mark_checkpoints(pipeline, checkpoints):\n points = set(checkpoints)\n\n def mark_stage(stage):\n c = stage.id\n if c in points:\n stage.checkpoint = True\n points.remove(c)\n\n def mark_assembler(assembler):\n c = assembler.id\n if c in points:\n assembler.checkpoint = True\n points.remove(c)\n\n def mark_pipeline(pl):\n for stage in pl.stages:\n mark_stage(stage)\n if pl.assembler:\n mark_assembler(pl.assembler)\n if pl.build:\n mark_pipeline(pl.build)\n\n mark_pipeline(pipeline)\n return points\n\n\ndef parse_manifest(path):\n if path == \"-\":\n manifest = json.load(sys.stdin)\n else:\n with open(path) as f:\n manifest = json.load(f)\n\n return manifest\n\n\ndef show_validation(result, name):\n if name == \"-\":\n name = \"\"\n\n print(f\"{BOLD}{name}{RESET} \", end='')\n\n if result:\n print(f\"is {BOLD}{GREEN}valid{RESET}\")\n return\n\n print(f\"has {BOLD}{RED}errors{RESET}:\")\n print(\"\")\n\n for error in result:\n print(f\"{BOLD}{error.id}{RESET}:\")\n print(f\" {error.message}\\n\")\n\n\ndef parse_arguments(sys_argv):\n parser = argparse.ArgumentParser(description=\"Build operating system images\")\n\n parser.add_argument(\"manifest_path\", metavar=\"MANIFEST\",\n help=\"json file containing the manifest that should be built, or a '-' to read from stdin\")\n parser.add_argument(\"--store\", metavar=\"DIRECTORY\", type=os.path.abspath,\n default=\".osbuild\",\n help=\"directory where intermediary os trees are stored\")\n parser.add_argument(\"--sources\", metavar=\"FILE\", type=os.path.abspath,\n help=\"json file containing a dictionary of source configuration\")\n parser.add_argument(\"-l\", \"--libdir\", metavar=\"DIRECTORY\", type=os.path.abspath, default=\"/usr/lib/osbuild\",\n help=\"the directory containing stages, assemblers, and the osbuild library\")\n parser.add_argument(\"--checkpoint\", metavar=\"ID\", action=\"append\", type=str, default=None,\n help=\"stage to commit to the object store during build (can be passed multiple times)\")\n parser.add_argument(\"--json\", action=\"store_true\",\n help=\"output results in JSON format\")\n parser.add_argument(\"--output-directory\", metavar=\"DIRECTORY\", type=os.path.abspath,\n help=\"directory where result objects are stored\")\n parser.add_argument(\"--inspect\", action=\"store_true\",\n help=\"return the manifest in JSON format including all the ids\")\n\n return parser.parse_args(sys_argv[1:])\n\n\n# pylint: disable=too-many-branches\ndef osbuild_cli():\n args = parse_arguments(sys.argv)\n manifest = parse_manifest(args.manifest_path)\n\n # first thing after parsing is validation of the input\n index = osbuild.meta.Index(args.libdir)\n res = osbuild.meta.validate(manifest, index)\n if not res:\n if args.json or args.inspect:\n json.dump(res.as_dict(), sys.stdout)\n sys.stdout.write(\"\\n\")\n else:\n show_validation(res, args.manifest_path)\n return 2\n\n pipeline = manifest.get(\"pipeline\", {})\n sources_options = manifest.get(\"sources\", {})\n\n if args.sources:\n with open(args.sources) as f:\n sources_options = json.load(f)\n\n pipeline = osbuild.load(pipeline, sources_options)\n\n if args.checkpoint:\n missed = mark_checkpoints(pipeline, args.checkpoint)\n if missed:\n for checkpoint in missed:\n print(f\"Checkpoint {BOLD}{checkpoint}{RESET} not found!\")\n print(f\"{RESET}{BOLD}{RED}Failed{RESET}\")\n return 1\n\n if args.inspect:\n result = {\"pipeline\": pipeline.description(with_id=True)}\n if sources_options:\n result[\"sources\"] = sources_options\n json.dump(result, sys.stdout)\n sys.stdout.write(\"\\n\")\n return 0\n\n if not args.output_directory and not args.checkpoint:\n print(\"No output directory or checkpoints specified, exited without building.\")\n return 0\n\n monitor_name = \"NullMonitor\" if args.json else \"LogMonitor\"\n monitor = osbuild.monitor.make(monitor_name, sys.stdout.fileno())\n\n try:\n r = pipeline.run(\n args.store,\n monitor,\n args.libdir,\n output_directory=args.output_directory\n )\n except KeyboardInterrupt:\n print()\n print(f\"{RESET}{BOLD}{RED}Aborted{RESET}\")\n return 130\n\n if args.json:\n json.dump(r, sys.stdout)\n sys.stdout.write(\"\\n\")\n else:\n if r[\"success\"]:\n print(\"tree id:\", pipeline.tree_id)\n print(\"output id:\", pipeline.output_id)\n else:\n print()\n print(f\"{RESET}{BOLD}{RED}Failed{RESET}\")\n\n return 0 if r[\"success\"] else 1\n","sub_path":"osbuild/main_cli.py","file_name":"main_cli.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371589679","text":"import sqlite3 as db \r\nfrom tkinter import *\r\nfrom tkinter import messagebox \r\nimport os\r\n\r\ndef clear():\r\n os.system('cls')\r\n\r\n\r\n\r\nmaster = Tk()\r\n\r\nDBName = \"mydb.db\"\r\nTableName = \"lp_Registered\"\r\n\r\n\r\nLabel(master,text=\"Tìm thông tin chủ xe\").grid()\r\nLabel(master,text=\"License Plate: \").grid(row=1,column=0)\r\ntxtlicenseplate = Entry(master,width=20)\r\ntxtlicenseplate.grid(row=1,column=1)\r\nLabel(master,text=\"\").grid()\r\n\r\n\r\ndef execute_Query(Query):\r\n\tconn = db.connect(DBName)\r\n\tcur = conn.cursor()\r\n\tcur.execute(Query)\r\n\tconn.commit()\r\n\tdata = cur.fetchall()\r\n\tcur.close()\r\n\tconn.close()\r\n\treturn data\r\n\r\n\r\n# Take a list ABC from a table.\r\ndef take_List(Field,TableName):\r\n\tQuery = \"SELECT \"+Field+\" from \"+TableName\r\n\treturn execute_Query(Query)\r\n\r\ndef changeTextForFilIntoQuery(ValueCondition):\r\n\treturn \"\\\"\"+ValueCondition+\"\\\"\"\r\n\r\n\r\ndef find_Owner_Of_License_Plate(license_plate):\r\n\tlicense_plate = changeTextForFilIntoQuery(license_plate)\r\n\tQuery = \"SELECT b.* FROM lp_Registered a,info_User b where a.studentID = b.studentID AND a.LicensePlate = \"+license_plate\r\n\treturn execute_Query(Query)\r\n\r\ndef search(value,List):\r\n\tfor x in List:\r\n\t\tif(x[0] == value):\r\n\t\t\treturn True\r\n\treturn False\r\n\r\ndef printList(List):\r\n\tfor x in List:\r\n\t\tprint(x)\r\n\r\ndef show_Info_Of_Owner():\r\n\tlicense_plate = txtlicenseplate.get()\r\n\tList = take_List(\"LicensePlate\",TableName)\r\n\tif(search(license_plate,List)):\r\n\t\towner = find_Owner_Of_License_Plate(license_plate)\r\n\t\tprintList(owner)\r\n\telse:\r\n\t\tmessagebox.showinfo(\"\",\"Không tìm thấy thông tin của biển số này\")\r\n\r\n\r\n\r\n\r\n\r\nbtnsubmit = Button(master,text=\"Tìm\",command=show_Info_Of_Owner,width=20)\r\nbtnsubmit.grid(column=1)\r\n\r\n\r\n\r\n\r\nmainloop()\r\n","sub_path":"DB/Select.py","file_name":"Select.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381097122","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport argparse\n\nfrom project import Project, Multi_Project\nfrom progress_learner import run_epoch, EventProgressEstimator\nfrom utils import TRAINING, VALIDATING, TESTING\nfrom config import Config, Qual_Config, Quan_Config\n\ndef learn_all_negative_samples():\n multi_p = Multi_Project.load(\"all_actions.proj\")\n\n config = Config()\n \n np.random.seed()\n\n with tf.Graph().as_default(), tf.Session() as session:\n for p in multi_p:\n if p.name != 'SlideAround':\n continue\n\n print ('-------------TRAINING FOR ' + p.name + ' -----------------')\n project_data = multi_p.data[p.name]\n\n total_data = np.concatenate( [project_data[TRAINING][0], project_data[VALIDATING][0], project_data[TESTING][0]], axis = 0 )\n total_lbl = np.concatenate( [project_data[TRAINING][1], project_data[VALIDATING][1], project_data[TESTING][1]], axis = 0 )\n total_info = np.concatenate( [project_data[TRAINING][2], project_data[VALIDATING][2], project_data[TESTING][2]], axis = 0 )\n\n s = \"model_\" + p.name\n with tf.variable_scope(s) as scope:\n print('-------- Setup m model ---------')\n m = EventProgressEstimator(is_training=True, name = p.name, config = config)\n \n with tf.variable_scope(s, reuse = True) as scope: \n print('-------- Setup mtest model ---------')\n mtest = EventProgressEstimator(is_training=False, name = p.name, config = config)\n \n session.run(tf.global_variables_initializer())\n \n \"\"\"\n Training first\n \"\"\"\n train_losses = []\n validate_losses = []\n\n for i in range(config.max_max_epoch):\n print('-------------------------------')\n start_time = time.time()\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.6f\" % (i + 1, session.run(m.lr)))\n \n indices = np.arange(total_data.shape[0])\n\n if config.epoch_shuffle:\n np.random.shuffle(indices)\n\n train_loss = run_epoch(m, total_data[indices], total_lbl[indices], info = total_info[indices], training = True)\n \n \n saver = tf.train.Saver()\n saver.save(session, \"progress_all.mod\")\n\n print('-------- Saved progress.mod ---------') \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Save a learned model from combining TRAIN/VALIDATE/TEST data.')\n\n parser.add_argument('-a', '--action', action='store', metavar = ('ACTION'),\n help = \"Action type. Choose from 'SlideToward', 'SlideAway', 'SlideNext', 'SlidePast', 'SlideAround'\" )\n parser.add_argument('-t', '--type', action='store', metavar = ('TYPE'), default='QUAL',\n help = \"Choose one of the followings: QUAL (qualitative), QUAN (quantitative). Default is QUAL\" )\n parser.add_argument('-p', '--project', action='store', metavar = ('PROJECT'),\n help = \"Location of project file.\" )\n parser.add_argument('-e', '--epoch', action='store', metavar = ('EPOCH'), default=50,type=int,\n help = \"Number of epochs.\" )\n parser.add_argument('-s', '--save', action='store', metavar = ('SAVE'),\n help = \"Where to save the progress model. Default is 'learned_models/progress_' + action + '.mod'\" )\n\n args = parser.parse_args()\n \n project_name = args.action\n\n feature_type = args.type\n if feature_type == 'QUAL':\n config = Qual_Config()\n elif feature_type == 'QUAN':\n config = Quan_Config()\n\n config.max_epoch = 10\n config.max_max_epoch = args.epoch\n\n project_file = args.project\n\n if project_file is None:\n if feature_type == 'QUAL':\n project_file = os.path.join('learned_models', project_name.lower() + \"_project.proj\")\n elif feature_type == 'QUAN':\n project_file = os.path.join('learned_models', project_name.lower() + \"_raw.proj\")\n\n progress_path = args.save\n if progress_path is None:\n progress_path = os.path.join('learned_models', \"progress_\" + project_name + \".mod\")\n\n print (' Create progress model by loading project file from %s, running %d epochs, and saving into %s ' % (project_file, config.max_max_epoch, progress_path) )\n \n p = Project.load(project_file)\n\n # Merge all data\n total_data = np.concatenate( [p.training_data, p.validation_data, p.testing_data], axis = 0 )\n total_lbl = np.concatenate( [p.training_lbl, p.validation_lbl, p.testing_lbl], axis = 0 )\n\n print (total_data.shape)\n print (total_lbl.shape)\n\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.variable_scope(\"model\") as scope:\n print('-------- Setup m model ---------')\n m = EventProgressEstimator(is_training=True, name = p.name, config = config)\n\n session.run(tf.global_variables_initializer())\n\n for i in range(config.max_max_epoch):\n print('-------------------------------')\n start_time = time.time()\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.6f\" % (i + 1, session.run(m.lr)))\n\n train_loss = run_epoch(m, total_data, total_lbl, training = True)\n\n\n saver = tf.train.Saver()\n saver.save(session, progress_path)\n\n print('-------- Saved progress file to ' + progress_path)","sub_path":"progress_learner_finalize.py","file_name":"progress_learner_finalize.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564945484","text":"import numpy as np\nimport pylab as plt\n\n#a)\n\ndata = np.loadtxt(r'cpresources\\sunspots.txt',float)\ntime, sunspots = np.transpose(data)\nplt.figure(1)\nplt.plot(time,sunspots)\nplt.title('Sunspots/month since 1749')\nplt.xlabel('time(Months)')\nplt.ylabel('Sunspots')\nplt.show()\n\n#b)\nn = 1000\nplt.figure(2)\nplt.plot(time[:n],sunspots[:n],'b-', label = 'Sunspots')\nplt.show()\n\n#c)\nr = 5\nN = len(sunspots)\na = np.array([sum(sunspots[-r+i:r+i])/(2*r) for i in range(r,N-r)])\nplt.figure(2)\nplt.plot(time[:1000],a[:1000],'r-',label = 'moving average')\nplt.title('Sunspots/month since 1749')\nplt.xlabel('time(Months)')\nplt.ylabel('Sunspots')\nplt.legend(loc='upperright')\nplt.show()\n","sub_path":"Chapter3/Ex3_1.py","file_name":"Ex3_1.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"16070610","text":"from django.urls import path\n\nfrom imager_calibrations.views import ImagerCadenceView, ImagerCadenceSiteView\n\napp_name = 'imager_calibrations'\n\nurlpatterns = [\n path('list/', ImagerCadenceView.as_view(), name='imager_home'),\n path('site//', ImagerCadenceSiteView.as_view(), name='site_detail')\n]","sub_path":"imager_calibrations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520406186","text":"from opengever.core.upgrade import SchemaMigration\nfrom opengever.meeting.activity.watchers import add_watcher_on_proposal_created\nfrom opengever.meeting.activity.watchers import add_watchers_on_submitted_proposal_created\n\n\nclass RegisterWatchersForProposals(SchemaMigration):\n \"\"\"Register watchers for proposals.\n \"\"\"\n\n def migrate(self):\n for obj in self.objects({'portal_type': 'opengever.meeting.proposal'},\n 'Register watchers for proposals'):\n add_watcher_on_proposal_created(obj)\n\n for obj in self.objects(\n {'portal_type': 'opengever.meeting.submittedproposal'},\n 'Register watchers for submitted proposals'):\n\n add_watchers_on_submitted_proposal_created(obj)\n","sub_path":"opengever/core/upgrades/20180703144847_register_watchers_for_proposals/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"37969022","text":"from Node_05 import Node\r\nfrom Tree_05 import RBT\r\n\r\nimport os\r\n\r\ndef search():\r\n filenames = os.listdir('./input/')\r\n return filenames\r\n\r\ndef fileData(files):\r\n data = []\r\n\r\n for file in files:\r\n f = open('./input/'+ file, 'r')\r\n lines = f.readlines()\r\n tmpData = []\r\n for line in lines:\r\n inputNumber = int(line.strip())\r\n tmpData.append(inputNumber)\r\n\r\n data.append(tmpData)\r\n f.close()\r\n return data \r\n\r\ndef main():\r\n fileNames = search()\r\n datas = fileData(fileNames)\r\n case = 0\r\n\r\n for data in datas:\r\n rbt = RBT()\r\n \r\n #read data for input \r\n for i in data:\r\n if i > 0:\r\n rbt.insert(i)\r\n elif i < 0:\r\n rbt.delete(-i)\r\n else:\r\n break\r\n\r\n\r\n print(\"filename = \" + fileNames[case])\r\n\r\n rbt.printTotalNode(rbt.root)\r\n\r\n rbt.printInsertedNode(rbt.root)\r\n\r\n rbt.printDeletedNode(rbt.root)\r\n\r\n rbt.printMissedNode(rbt.root)\r\n\r\n rbt.printBlackNode(rbt.root)\r\n\r\n rbt.printBlackHeight(rbt.root)\r\n\r\n print(\"Inorder Traversal: \")\r\n rbt.inOrderTraversal(rbt.root)\r\n\r\n print(\"\\n\")\r\n\r\n case += 1 \r\n\r\nmain()\r\n","sub_path":"Main_05.py","file_name":"Main_05.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593867499","text":"\"\"\"Tests for helper functions.\"\"\"\nimport itertools\nimport string\nfrom typing import Any, Collection, List, Tuple\nfrom unittest import mock\n\nimport pytest\nfrom hypothesis import given, strategies\n\nfrom aiousps.helpers import enumerated_chunker, find_nonmatching_fields\n\nENUMERATED_CHUNKER_TEST_VALUES = [\n [range(10), 3, list(enumerate(range(10)))],\n [range(19, 100), 13, list(enumerate(range(19, 100)))],\n [range(150, 300), 1, list(enumerate(range(150, 300)))],\n [['a', 'b', 'c'], 2, [(0, 'a'), (1, 'b'), (2, 'c')]]\n]\nELLIPSIS_ATTRS = dir(...)\nVALID_FIELD_INITIAL_CHARACTERS = string.ascii_letters + '_'\n\n\n@pytest.mark.parametrize(['iterable', 'n_items', 'expected'], ENUMERATED_CHUNKER_TEST_VALUES)\ndef test_enumerated_chunker_pairs_expected(iterable, n_items, expected):\n actual = enumerated_chunker(iterable, n_items)\n flattened = list(itertools.chain.from_iterable(actual))\n assert expected == flattened\n\n\n@pytest.mark.parametrize(['collection', 'n_items', 'expected'], ENUMERATED_CHUNKER_TEST_VALUES)\ndef test_enumerated_chunker_chunk_length(collection: Collection, n_items: int, expected: List[Tuple[int, Any]]):\n actual = list(enumerated_chunker(collection, n_items))\n\n lengths = list(map(len, actual))\n assert max(lengths) == n_items\n assert min(lengths) == len(collection) % n_items or n_items\n\n\nclass TestFindNonmatchingFields:\n \"\"\"Tests for `find_nonmatching_fields` class.\"\"\"\n PYTHON_VALID_FIELD_STRATEGY = strategies.text(\n strategies.sampled_from(string.ascii_letters + '_'),\n min_size=1,\n )\n\n @given(fields=strategies.lists(PYTHON_VALID_FIELD_STRATEGY))\n def test_success_on_useless_test(self, fields):\n mock_item = mock.Mock()\n\n actual = find_nonmatching_fields(mock_item, lambda _: True, *fields)\n\n assert actual == []\n\n @given(fields=strategies.lists(PYTHON_VALID_FIELD_STRATEGY.filter(lambda x: x not in ELLIPSIS_ATTRS), min_size=1,\n max_size=10))\n def test_multiple_attributeerrors(self, fields):\n fake_item = ...\n fields_regex = ', '.join(['.+'] * len(fields))\n error_regex = f'ellipsis does not contain fields: {fields_regex}'\n\n with pytest.raises(AttributeError, match=error_regex) as e:\n find_nonmatching_fields(fake_item, lambda _: True, *fields)\n\n @given(fields=strategies.lists(PYTHON_VALID_FIELD_STRATEGY, min_size=1))\n def test_failure_returns_expected_message(self, fields):\n mock_item = mock.Mock()\n\n actual = find_nonmatching_fields(mock_item, lambda _: False, *fields)\n\n assert actual == fields\n","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263192420","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom azure.identity import DefaultAzureCredential\nfrom azure.mgmt.sql import SqlManagementClient\n\n\"\"\"\n# PREREQUISITES\n pip install azure-identity\n pip install azure-mgmt-sql\n# USAGE\n python managed_instance_create_max.py\n\n Before run the sample, please set the values of the client ID, tenant ID and client secret\n of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,\n AZURE_CLIENT_SECRET. For more info about how to get the value, please see:\n https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal\n\"\"\"\n\n\ndef main():\n client = SqlManagementClient(\n credential=DefaultAzureCredential(),\n subscription_id=\"20D7082A-0FC7-4468-82BD-542694D5042B\",\n )\n\n response = client.managed_instances.begin_create_or_update(\n resource_group_name=\"testrg\",\n managed_instance_name=\"testinstance\",\n parameters={\n \"location\": \"Japan East\",\n \"properties\": {\n \"administratorLogin\": \"dummylogin\",\n \"administratorLoginPassword\": \"PLACEHOLDER\",\n \"administrators\": {\n \"azureADOnlyAuthentication\": True,\n \"login\": \"bob@contoso.com\",\n \"principalType\": \"User\",\n \"sid\": \"00000011-1111-2222-2222-123456789111\",\n \"tenantId\": \"00000011-1111-2222-2222-123456789111\",\n },\n \"collation\": \"SQL_Latin1_General_CP1_CI_AS\",\n \"dnsZonePartner\": \"/subscriptions/20D7082A-0FC7-4468-82BD-542694D5042B/resourceGroups/testrg/providers/Microsoft.Sql/managedInstances/testinstance\",\n \"instancePoolId\": \"/subscriptions/20D7082A-0FC7-4468-82BD-542694D5042B/resourceGroups/testrg/providers/Microsoft.Sql/instancePools/pool1\",\n \"licenseType\": \"LicenseIncluded\",\n \"maintenanceConfigurationId\": \"/subscriptions/20D7082A-0FC7-4468-82BD-542694D5042B/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_JapanEast_MI_1\",\n \"minimalTlsVersion\": \"1.2\",\n \"proxyOverride\": \"Redirect\",\n \"publicDataEndpointEnabled\": False,\n \"requestedBackupStorageRedundancy\": \"Geo\",\n \"servicePrincipal\": {\"type\": \"SystemAssigned\"},\n \"storageSizeInGB\": 1024,\n \"subnetId\": \"/subscriptions/20D7082A-0FC7-4468-82BD-542694D5042B/resourceGroups/testrg/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1\",\n \"timezoneId\": \"UTC\",\n \"vCores\": 8,\n },\n \"sku\": {\"name\": \"GP_Gen5\", \"tier\": \"GeneralPurpose\"},\n \"tags\": {\"tagKey1\": \"TagValue1\"},\n },\n ).result()\n print(response)\n\n\n# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2022-08-01-preview/examples/ManagedInstanceCreateMax.json\nif __name__ == \"__main__\":\n main()\n","sub_path":"sdk/sql/azure-mgmt-sql/generated_samples/managed_instance_create_max.py","file_name":"managed_instance_create_max.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617833028","text":"import sys\n\nfilename= sys.argv[1]\n\nwith open(filename, 'r') as file:\n\tstr_time = [row.split() for row in file]\n\tplus_man = [float(elem[0].replace(':','.')) for elem in str_time ]\n\tminus_man = [float(elem[1].replace(':','.')) for elem in str_time ]\n\nto_calculate = {} # Человек пришел +1, вышел -1\nfor elem in plus_man:\n\tif elem in to_calculate:\n\t\tto_calculate[elem] += 1\n\telse:\n\t\tto_calculate[elem] = 1\nfor elem in minus_man:\n\tif elem in to_calculate:\n\t\tto_calculate[elem] -= 1\n\telse:\n\t\tto_calculate[elem] = -1\n\nto_cumul_sum = [] # Отсортированный словарь преобразованный в список\nfor i in sorted(to_calculate):\n\tto_cumul_sum.append([i, to_calculate[i]])\n\ncumul_sum = [] # Подсчет кумулятивной суммы\ns = 0\nfor elem in to_cumul_sum:\n\ts += elem[1]\n\tcumul_sum.append(s)\n\nmx = max(cumul_sum) # Максимальное число посетителей одновременно\n\nindexes = [] # Индексы с числом посителей равным максимальному\nfor n,i in enumerate(cumul_sum):\n\tif i == mx:\n\t\tindexes.append(n)\n\nstart = [indexes[0]] # Получаем начало и конец интервала\nend = []\nfor i in range(len(indexes)-1):\n\tif indexes[i+1] > (indexes[i] + 1):\n\t\tstart.append(indexes[i+1])\n\t\tend.append(indexes[i]+1)\nend.append(indexes[-1]+1)\n\n\nfor i, j in zip(start,end): # Вывод\n left = str(to_cumul_sum[i][0]).replace('.',':')\n right = str(to_cumul_sum[j][0]).replace('.',':')\n if len(left) == 3:\n left += '0'\n if len(right) == 3:\n right += '0'\n print(left, right)","sub_path":"task4/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573323111","text":"import os\nimport logging\nimport datetime\n\nfrom technews import mail_util\nfrom technews import TechNews\nfrom technews import EmailContentHelper\n\n\ndef main():\n mh = EmailContentHelper()\n\n news_list = [\n TechNews(\"orange\").get_today_news,\n TechNews(\"ithome\").get_today_news,\n TechNews(\"business\").get_today_news,\n TechNews(\"inside\").get_today_news,\n ]\n\n news_rows = \"\"\n skip_counts = 0\n for news_getter in news_list:\n news_data = news_getter()\n news_title = news_data[\"news_page_title\"]\n logging.info(\"[%s] - [%s]\", news_title, news_data[\"news_counts\"])\n if news_data[\"news_counts\"] == 0:\n skip_counts += 1\n continue\n news_rows += mh.get_news_html_contents(news_data, news_title)\n\n if skip_counts == len(news_list):\n logging.info(\"No any tech news today.\")\n return\n\n date = datetime.date.today().strftime(\"%Y/%m/%d\")\n mail_subject = f\"科技新聞 Tech News - {date}\"\n email_html = mh.get_email_html(mail_subject, news_rows)\n\n mail_util.mail_sender(\n os.environ[\"MAIL_SENDER\"],\n os.environ[\"MAIL_SENDER_PWD\"],\n os.environ[\"MAIL_RECV\"].split(\",\"),\n email_html,\n mail_subject,\n \"html\")\n logging.info(\"Send Today's Tech News Completed!\")\n\n\nif __name__ == \"__main__\":\n\n try:\n LOG_LEVEL = os.environ[\"LOG_LEVEL\"]\n except Exception:\n LOG_LEVEL = logging.INFO\n\n log_format = (\"%(asctime)s [ %(levelname)s ] %(message)s \"\n \"(%(filename)s:%(lineno)s)-[%(module)s.%(funcName)s]\")\n date_format = \"%Y-%m-%d %H:%M:%S\"\n logging.basicConfig(format=log_format, level=LOG_LEVEL, datefmt=date_format)\n main()\n","sub_path":"technews/daily_news.py","file_name":"daily_news.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154210026","text":"import sys\r\nimport os\r\nfrom glob import glob\r\nimport numpy as np\r\nfrom scipy.io import wavfile\r\nimport models as net\r\nimport models_asr as asrnet\r\nfrom dataset_reader_emb import DataManager\r\nfrom config_utils import load_configfile, check_trainconfiguration\r\nfrom transcription2phonemes import load_dictionary, get_phonemes_from_labels\r\nimport lws\r\n\r\ntry:\r\n import tensorflow as tf\r\nexcept ImportError:\r\n print('Failed to import TensorFlow module.')\r\n\r\n# Avoid printing tensorflow log messages\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\n\r\ndef infer(model_path, model_path_asr, data_path_test, audio_path, out_file_prefix, dictionary_file, norm=True, oracle_phase=False, batch_size=1):\r\n config = load_configfile(os.path.join(model_path, 'config.txt'))\r\n config = check_trainconfiguration(config)\r\n config_asr = load_configfile(os.path.join(model_path_asr, 'config.txt'))\r\n config_asr = check_trainconfiguration(config_asr)\r\n\r\n # Create the DataManager that reads TFRecords.\r\n with tf.name_scope('test_batch'):\r\n test_data_manager = DataManager(num_audio_samples=config['audio_len'], audio_feat_size=config['audio_feat_dim'],\r\n video_feat_size=config['video_feat_dim'], buffer_size=4000, mode='fixed')\r\n test_files_list = glob(os.path.join(data_path_test, '*.tfrecord'))\r\n test_dataset = test_data_manager.get_dataset(test_files_list, shuffle=False)\r\n test_batch_dataset, test_it = test_data_manager.get_iterator(test_dataset, batch_size=batch_size,\r\n n_epochs=1, drop_remainder=False)\r\n next_test_batch = test_it.get_next()\r\n \r\n # Load normalization data\r\n if norm:\r\n audio_feat_mean = np.load(os.path.join(model_path, 'audio_features_mean.npy'))\r\n audio_feat_std = np.load(os.path.join(model_path, 'audio_features_std.npy'))\r\n else:\r\n audio_feat_mean = np.zeros(config['audio_feat_dim'])\r\n audio_feat_std = np.ones(config['audio_feat_dim'])\r\n audio_feat_mean_asr = np.load(os.path.join(model_path_asr, 'audio_features_mean.npy'))\r\n audio_feat_std_asr = np.load(os.path.join(model_path_asr, 'audio_features_std.npy'))\r\n\r\n # Placeholders.\r\n with tf.name_scope('placeholder'):\r\n sequence_lengths_ph = tf.placeholder(tf.int32, shape=[None], name='sequence_lengths')\r\n labels_lengths_ph = tf.placeholder(tf.int32, shape=[None], name='labels_lengths')\r\n target_sources_ph = tf.placeholder(tf.float32, shape=[None, config['audio_len']], name='target_sources')\r\n video_features_ph = tf.placeholder(tf.float32, shape=[None, None, config['video_feat_dim']], name='video_features')\r\n embeddings_ph = tf.placeholder(tf.float32, shape=[None, 512], name='embeddings')\r\n masks_ph = tf.placeholder(tf.float32, shape=[None, None, config['audio_feat_dim']], name='masks')\r\n labels_ph = tf.placeholder(tf.float32, shape=[None, None], name='labels')\r\n audio_feat_mean_ph = tf.placeholder(tf.float32, shape=[len(audio_feat_mean)], name='features_mean')\r\n audio_feat_std_ph = tf.placeholder(tf.float32, shape=[len(audio_feat_mean)], name='features_std')\r\n audio_feat_mean_asr_ph = tf.placeholder(tf.float32, shape=[len(audio_feat_mean_asr)], name='features_mean_asr')\r\n audio_feat_std_asr_ph = tf.placeholder(tf.float32, shape=[len(audio_feat_mean_asr)], name='features_std_asr')\r\n dropout_rate_ph = tf.placeholder(tf.float32, name='dropout_rate')\r\n \r\n\r\n # Graph building and definition.\r\n print('Building speech inpainting inference model..')\r\n if config['model'] == 'av-blstm-twosteps':\r\n model = net.StackedBLSTM2StepsModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, video_features=video_features_ph)\r\n model.build_graph(var_scope=config['model'])\r\n else:\r\n with tf.variable_scope(config['model']):\r\n if config['model'] == 'a-blstm':\r\n model = net.StackedBLSTMModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, input='a')\r\n elif config['model'] == 'v-blstm':\r\n model = net.StackedBLSTMModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, input='v', video_features=video_features_ph)\r\n elif config['model'] == 'av-blstm':\r\n model = net.StackedBLSTMModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, input='av', video_features=video_features_ph)\r\n elif config['model'] == 'a-blstm-ssnn':\r\n model = net.StackedBLSTMSSNNModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, input='a')\r\n elif config['model'] == 'v-blstm-ssnn':\r\n model = net.StackedBLSTMSSNNModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, input='v', video_features=video_features_ph)\r\n model.build_graph(var_scope=config['model'])\r\n elif config['model'] == 'av-blstm-ssnn':\r\n model = net.StackedBLSTMSSNNModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, input='av', video_features=video_features_ph)\r\n elif config['model'] == 'a-blstm-emb':\r\n model = net.StackedBLSTMEmbeddingModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, embeddings=embeddings_ph, input='a', is_training=False)\r\n elif config['model'] == 'v-blstm-emb':\r\n model = net.StackedBLSTMEmbeddingModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, embeddings=embeddings_ph, input='v', video_features=video_features_ph)\r\n elif config['model'] == 'av-blstm-emb':\r\n model = net.StackedBLSTMEmbeddingModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config, embeddings=embeddings_ph, input='av', video_features=video_features_ph)\r\n elif config['model'] == 'unet':\r\n model = net.UNetFConvModel(sequence_lengths_ph, target_sources_ph, masks_ph, audio_feat_mean_ph,\r\n audio_feat_std_ph, dropout_rate_ph, config)\r\n else:\r\n print('Model selection must be \"a-blstm\", \"v-blstm\", \"av-blstm\", \"av-blstm-twosteps\" or \"unet\". Closing...')\r\n sys.exit(1)\r\n model.build_graph(var_scope=config['model'])\r\n print('done.')\r\n print('Building ASR inference model:')\r\n with tf.variable_scope('asr/' + config_asr['model']):\r\n if config_asr['model'] == 'a-blstm':\r\n model_asr = asrnet.StackedBLSTMModel(sequence_lengths_ph, labels_lengths_ph, target_sources_ph, masks_ph, labels_ph,\r\n audio_feat_mean_asr_ph, audio_feat_std_asr_ph, dropout_rate_ph, config_asr, input='a')\r\n elif config_asr['model'] == 'v-blstm':\r\n model_asr = asrnet.StackedBLSTMModel(sequence_lengths_ph, labels_lengths_ph, target_sources_ph, masks_ph, labels_ph,\r\n audio_feat_mean_asr_ph, audio_feat_std_asr_ph, dropout_rate_ph, config, input='v', video_features=video_features_ph)\r\n elif config_asr['model'] == 'av-blstm':\r\n model_asr = asrnet.StackedBLSTMModel(sequence_lengths_ph, labels_lengths_ph, target_sources_ph, masks_ph, labels_ph,\r\n audio_feat_mean_asr_ph, audio_feat_std_asr_ph, dropout_rate_ph, config, input='av', video_features=video_features_ph)\r\n else:\r\n print('Model selection must be \"a-blstm\", \"v-blstm\", \"av-blstm\". Closing...')\r\n sys.exit(1)\r\n model_asr.build_graph(var_scope='asr/' + config_asr['model'])\r\n # Work-around bug TF\r\n model_asr_train_vars = []\r\n for v in model_asr.train_vars:\r\n if 'asr/' + config_asr['model'] in v.name:\r\n model_asr_train_vars.append(v)\r\n print('done.')\r\n\r\n # Load phonemes dictionary\r\n ph_dict = load_dictionary(dictionary_file)\r\n\r\n # The inizializer operation.\r\n init_op = tf.group(test_it.initializer, tf.global_variables_initializer(), tf.local_variables_initializer())\r\n \r\n # Start session\r\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\r\n log_device_placement=False,\r\n gpu_options=tf.GPUOptions(allow_growth=True))) as sess:\r\n sess.run(init_op)\r\n\r\n # Load model weigths\r\n print('Restore weigths:')\r\n saver = tf.train.Saver(var_list=model.train_vars)\r\n saver.restore(sess, os.path.join(model_path, 'sinet'))\r\n saver_asr = tf.train.Saver(var_list=model_asr_train_vars)\r\n saver_asr.restore(sess, os.path.join(model_path_asr, 'asrnet'))\r\n print('done.\\n')\r\n\r\n # Get enhanced sources tensor op\r\n if oracle_phase:\r\n enhanced_sources_tensor = model.enhanced_sources_oracle_phase\r\n else:\r\n enhanced_sources_tensor = model.enhanced_sources\r\n\r\n # LWS module initialization\r\n lws_processor = lws.lws(384, 192, fftsize=512, mode='speech')\r\n\r\n try:\r\n total_samples = 0\r\n loss_hole_list = []\r\n loss_asr_list = []\r\n per_list = []\r\n \r\n print('Starting inference on dataset: {:s}'.format(data_path_test))\r\n while True:\r\n # Fetch test samples batch.\r\n test_length, test_lab_length, test_target_audio, test_embeddings, test_sample_path, test_labels, \\\r\n test_video_features, test_mask = sess.run(next_test_batch)\r\n \r\n # Speech inpainting inference\r\n test_enhanced_audio, loss_hole = \\\r\n sess.run(fetches=[enhanced_sources_tensor, model.loss],\r\n feed_dict={\r\n sequence_lengths_ph:test_length,\r\n labels_lengths_ph: test_lab_length,\r\n target_sources_ph: test_target_audio,\r\n video_features_ph: test_video_features,\r\n embeddings_ph: test_embeddings,\r\n masks_ph: test_mask,\r\n labels_ph: test_labels,\r\n audio_feat_mean_ph: audio_feat_mean,\r\n audio_feat_std_ph: audio_feat_std,\r\n dropout_rate_ph: 0.0\r\n })\r\n\r\n # ASR inference\r\n test_decoded, loss_asr, per = \\\r\n sess.run(fetches=[model_asr.decoding, model_asr.loss, model_asr.per],\r\n feed_dict={\r\n sequence_lengths_ph:test_length,\r\n labels_lengths_ph: test_lab_length,\r\n target_sources_ph: test_enhanced_audio,\r\n video_features_ph: test_video_features,\r\n masks_ph: test_mask,\r\n labels_ph: test_labels,\r\n audio_feat_mean_asr_ph: audio_feat_mean_asr,\r\n audio_feat_std_asr_ph: audio_feat_std_asr,\r\n dropout_rate_ph: 0.0\r\n })\r\n \r\n for enhanced, sample_dir, mask, seq_len, decoded in zip(test_enhanced_audio, test_sample_path.values, test_mask, test_length, test_decoded):\r\n # Reconstruct phase with LWS algorithm if required\r\n if not oracle_phase:\r\n stft = lws_processor.stft(enhanced)\r\n #mask_adj = np.zeros_like(stft)\r\n #mask_adj[: mask.shape[0], :mask.shape[1]] = mask\r\n mag_spec = np.abs(stft)\r\n #ang_spec = np.angle(stft) * mask_adj\r\n #rec_stft = lws_processor.run_lws(mag_spec * np.exp(1j * ang_spec))\r\n rec_stft = lws_processor.run_lws(mag_spec)\r\n #rec_mag = np.abs(rec_stft)\r\n #rec_ang = np.angle(rec_stft)\r\n #rec_ang_adj = ang_spec + rec_ang * (1 - mask_adj)\r\n #rec_stft_adj = rec_mag * np.exp(1j * rec_ang_adj)\r\n rec_stft_adj = rec_stft\r\n enhanced = lws_processor.istft(rec_stft_adj)\r\n\r\n #sample_dir = ''.join([chr(x) for x in np.trim_zeros(sample_dir)])\r\n sample_dir = sample_dir.decode()\r\n # Save enhanced waveform\r\n os.makedirs(os.path.join(audio_path, sample_dir, 'enhanced'), exist_ok=True)\r\n num_wav_samples = seq_len * 192\r\n out_filename = os.path.join(audio_path, sample_dir, 'enhanced', out_file_prefix + '.wav')\r\n wavfile.write(out_filename, 16000, enhanced[: num_wav_samples].astype(np.int16))\r\n # Save embeddings\r\n #os.makedirs(os.path.join(audio_path, sample_dir, 'embeddings'), exist_ok=True)\r\n #out_filename = os.path.join(audio_path, sample_dir, 'embeddings', out_file_prefix + '.npy')\r\n #np.save(out_filename, emb)\r\n #out_filename = os.path.join(audio_path, sample_dir, 'embeddings', out_file_prefix + '_ext.npy')\r\n #np.save(out_filename, emb_ext)\r\n # Save transcription\r\n decoded_pad_idx = np.where(decoded == -1)[0]\r\n decoded_len = len(decoded) if len(decoded_pad_idx) == 0 else decoded_pad_idx.min()\r\n decoded = decoded[: decoded_len]\r\n decoded_ph = get_phonemes_from_labels(decoded, ph_dict)\r\n decoded_str = ','.join(decoded_ph)\r\n os.makedirs(os.path.join(audio_path, sample_dir, 'transcriptions'), exist_ok=True)\r\n out_filename = os.path.join(audio_path, sample_dir, 'transcriptions', out_file_prefix + '.lbl')\r\n with open(out_filename, 'w') as f:\r\n f.write(decoded_str)\r\n\r\n loss_hole_list.append(loss_hole)\r\n loss_asr_list.append(loss_asr)\r\n per_list += list(per)\r\n total_samples += len(test_length)\r\n print('Processed {:d} utterances. Total samples processed so far {:d}.'.format(len(test_length), total_samples))\r\n except tf.errors.OutOfRangeError:\r\n print('done.')\r\n\r\n print('Loss hole: {:.5}'.format(np.mean(loss_hole_list)))\r\n print('Loss ASR: {:.5}'.format(np.mean(loss_asr_list)))\r\n print('PER: {:.5}'.format(np.mean(per_list)))\r\n \r\n\r\nif __name__ == '__main__':\r\n model_path = 'C:\\\\Users\\\\Public\\\\aau_data\\\\GRID\\\\logs\\\\test_si_dataset\\\\a-blstm_exp0\\\\netmodel'\r\n model_path_asr = 'C:\\\\Users\\\\Public\\\\aau_data\\\\GRID\\\\logs\\\\test_si_dataset\\\\asr_a-blstm_exp1\\\\netmodel'\r\n data_path = 'C:\\\\Users\\\\Public\\\\aau_data\\\\GRID\\\\tfrecords\\\\test_si_dataset\\\\test-set-lbl'\r\n audio_path = 'C:\\\\Users\\\\Public\\\\aau_data\\\\GRID\\\\test_si_dataset\\\\test-set-lbl'\r\n out_file_prefix = 'a-blstm_exp0'\r\n norm = True\r\n oracle_phase = False\r\n batch_size = 10\r\n dict_file = 'C:\\\\Users\\\\Public\\\\aau_data\\\\GRID\\\\dictionary.txt'\r\n\r\n infer(model_path, model_path_asr, data_path, audio_path, out_file_prefix, dict_file, norm, oracle_phase, batch_size)","sub_path":"av_speech_inpainting/inference_siasr.py","file_name":"inference_siasr.py","file_ext":"py","file_size_in_byte":16593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246390633","text":"f = open('log.txt','r')\np_l = []\nfor i in range(1,101,1):\n line = f.readline()\n psnr = float(line[-11:-4])\n p_l.append(psnr)\nmax_psnr = max(p_l)\nprint(max_psnr)\nprint(p_l.index(max_psnr) + 1)\n ","sub_path":"Experiment c/read_log.py","file_name":"read_log.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74120384","text":"from django.core.management import call_command\nfrom django.test import RequestFactory, TestCase\nfrom django.test.utils import override_settings\n\nfrom model_mommy import mommy\n\nfrom .middleware import EnforceCurrentSiteMiddleware\nfrom .models import RegistrationNumber, Distribution\n\n\nclass DistributionTests(TestCase):\n\n def setUp(self):\n cards = []\n for number in xrange(1, 16):\n cards.append(mommy.prepare(RegistrationNumber, number=number,\n active=True))\n RegistrationNumber.objects.bulk_create(cards)\n\n def test_invitees(self):\n dist1 = mommy.make(Distribution, supplies_quantity=6)\n dist1_values = dist1.invitees.values_list(\"number\", flat=True)\n self.assertEqual(list(dist1_values), [1, 2, 3, 4, 5, 6])\n self.assertEqual(dist1.finish_number, 6)\n\n dist2 = mommy.make(Distribution, supplies_quantity=10)\n dist2_values = dist2.invitees.values_list(\"number\", flat=True)\n self.assertEqual(list(dist2_values),\n [1, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n self.assertEqual(dist2.finish_number, 1)\n\n def test_only_active_invitees(self):\n RegistrationNumber.objects.filter(number=3).update(active=False)\n dist = mommy.make(Distribution, supplies_quantity=5)\n dist_values = dist.invitees.values_list(\"number\", flat=True)\n self.assertEqual(list(dist_values), [1, 2, 4, 5, 6])\n self.assertEqual(dist.finish_number, 6)\n\n\n@override_settings(INSTALLED_APPS=(\"django.contrib.sites\", ))\nclass MiddlewareTest(TestCase):\n\n def setUp(self):\n call_command(\"migrate\", \"sites\", verbosity=0)\n self.middleware = EnforceCurrentSiteMiddleware()\n self.rf = RequestFactory()\n\n def test_simple(self):\n request = self.rf.get(\"/foo\")\n response = self.middleware.process_request(request)\n self.assertIsNotNone(response)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, \"//example.com/foo\")\n\n def test_with_site_id(self):\n request = self.rf.get(\"/foo\")\n with self.settings(SITE_ID=1):\n response = self.middleware.process_request(request)\n self.assertIsNotNone(response)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, \"//example.com/foo\")\n\n def test_without_sites(self):\n request = self.rf.get(\"/foo\")\n with self.settings(INSTALLED_APPS=()):\n response = self.middleware.process_request(request)\n self.assertIsNone(response)\n self.assertEqual(type(request.site).__name__, \"RequestSite\")\n","sub_path":"refugeedata/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"614812214","text":"from __future__ import print_function\r\nimport argparse\r\nimport cv2\r\n\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-i\", \"--image\", required = True, help = \"help\")\r\nargs = vars(ap.parse_args())\r\n\r\nimage = cv2.imread(args[\"image\"])\r\ncv2.imshow(\"Image\", image)\r\ncv2.waitKey(0)\r\ncv2.imwrite(\"trex.jpg\",image)","sub_path":"png2jpg.py","file_name":"png2jpg.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"148682418","text":"import sqlite3\nimport pandas as pd\nimport statsmodels as sm\n\ndef percharscore():\n\n\tdb = sqlite3.connect('compass.db')\n\n\tpercharpull = (\"\"\"\n\t\tSELECT * FROM Perchar;\n\t\t\"\"\")\n\n\tperchardata = pd.read_sql(percharpull, db)\n\n\t'MULTIPLIERS'\n\n\tdegree = {'low': 2,'mid': 5,'high': 2}\n\tmba = {'yes': 3, 'no': 1}\n\tcerts = {'yes': 3, 'no': 1}\n\tage = -0.1\n\tmarried = {'yes': 2, 'no': 3}\n\tann_income = {'$1M+': 0.03, '$500K-$999K': 0.02, '$200K-$499K': 0.01, '$75K-$199K': 0.01, '$0K-$75K': 0.01}\n\tpct_ann_income_ret = {'Over 5%': 3, 'Less than 5%': 1}\n\tdiv = -5\n\tchild_u10 = -3\n\twork_exp = 0.25\n\tnum_companies = {'grower': 2, 'journeyman': -2}\n\tinvestor = 8\n\tillness = -5\n\tmoney_multipliers = {'liq_inv_assets': 0.01, 'ret_assets': 0.001, 'home_val': 0.001, 'current_debt': -0.05}\n\n\tscore_list = []\n\n\tfor row in perchardata.iterrows():\n\n\t\tscore = 0\n\t\tscore_dict = {}\n\n\t\tif row[1]['degree'] <= 3:\n\t\t\tscore += row[1]['mba'] * degree['low']\n\t\telif row[1]['degree'] > 3 and row[1]['degree'] <= 5:\n\t\t\tscore += row[1]['mba'] * degree['mid']\n\t\tif row[1]['degree'] > 5:\n\t\t\tscore += row[1]['mba'] * degree['high']\n\n\t\tif row[1]['mba'] == 0:\n\t\t\tscore += row[1]['mba'] * mba['no']\n\t\telse:\n\t\t\tscore += row[1]['mba'] * mba['yes']\n\n\t\tif row[1]['certs'] == None:\n\t\t\tscore += certs['no']\n\t\telse:\n\t\t\tscore += certs['yes']\n\n\t\tscore += row[1]['age'] * age\n\n\t\tif row[1]['married'] == 0:\n\t\t\tscore += row[1]['married'] * married['no']\n\t\telse:\n\t\t\tscore += row[1]['married'] * married['yes']\n\n\t\tif row[1]['ann_income'] > 5:\n\t\t\tscore += ann_income['$1M+']\n\t\telif row[1]['ann_income'] >=200 and row[1]['ann_income'] <= 499:\n\t\t\tscore += row[1]['ann_income'] * ann_income['$500K-$999K']\n\t\telif row[1]['ann_income'] >=75 and row[1]['ann_income'] <= 199:\n\t\t\tscore += row[1]['ann_income'] * ann_income['$200K-$499K']\n\t\telif row[1]['ann_income'] >=75 and row[1]['ann_income'] <= 199:\n\t\t\tscore += row[1]['ann_income'] * ann_income['$75K-$199K']\n\t\telse:\n\t\t\tscore += row[1]['ann_income'] * ann_income['$75K-$199K']\n\n\t\tif row[1]['pct_ann_income_ret'] > 5:\n\t\t\tscore += row[1]['pct_ann_income_ret'] * pct_ann_income_ret['Over 5%']\n\t\telse:\n\t\t\tscore += row[1]['pct_ann_income_ret'] * pct_ann_income_ret['Less than 5%']\n\n\t\tif row[1]['num_companies'] > 4:\n\t\t\tscore += row[1]['num_companies'] * num_companies['journeyman']\n\t\telse:\n\t\t\tscore += row[1]['num_companies'] * num_companies['grower']\n\n\t\tmoney_score = row[1]['liq_inv_assets'] * money_multipliers['liq_inv_assets'] + row[1]['ret_assets'] * money_multipliers['ret_assets'] + row[1]['home_val'] * money_multipliers['home_val'] + row[1]['current_debt'] * money_multipliers['current_debt']\n\t\tscore += money_score\n\n\t\tscore += row[1]['div'] * div\n\t\tscore += row[1]['child_u10'] * child_u10\n\t\tscore += row[1]['work_exp'] * work_exp\n\t\tscore += row[1]['investor'] * investor\n\t\tscore += row[1]['illness'] * illness\n\n\t\tscore_dict = {'user_id': row[1]['user_id'], 'score': round(score,1)}\n\t\tscore_list.append(score_dict)\n\n\tdef quantile_generator(q):\n\n\t\tquantile = perchardata.quantile(q=q)\n\t\tquantile_dict = {}\n\n\t\tquantile_dict['degree'] = quantile['degree']\n\t\tquantile_dict['mba'] = quantile['mba']\n\t\tquantile_dict['age'] = quantile['age']\n\t\tquantile_dict['married'] = quantile['married']\n\t\tquantile_dict['ann_income'] = quantile['ann_income']\n\t\tquantile_dict['pct_ann_income_ret'] = quantile['pct_ann_income_ret']\n\t\tquantile_dict['birth_year'] = quantile['birth_year']\n\t\tquantile_dict['div'] = quantile['div']\n\t\tquantile_dict['child_u10'] = quantile['child_u10']\n\t\tquantile_dict['work_exp'] = quantile['work_exp']\n\t\tquantile_dict['num_companies'] = quantile['num_companies']\n\t\tquantile_dict['investor'] = quantile['investor']\n\t\tquantile_dict['liq_inv_assets'] = quantile['liq_inv_assets']\n\t\tquantile_dict['ret_assets'] = quantile['ret_assets']\n\t\tquantile_dict['home_val'] = quantile['home_val']\n\t\tquantile_dict['current_debt'] = quantile['current_debt']\n\t\tquantile_dict['illness'] = quantile['illness']\n\n\t\treturn quantile_dict\n\n\tq25 = quantile_generator(0.25)\n\tq50 = quantile_generator(0.50)\n\tq75 = quantile_generator(0.75)\n\n\treturn {'perchar_score_list': score_list, 'pc_q25': q25, 'pc_q50': q50, 'pc_q75': q75}","sub_path":"percharscore.py","file_name":"percharscore.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512648061","text":"'''\nShambhu Thapa 10677794\n\nProject 4\nOthelloGame.py\n'''\n\nimport Othello\nfrom UserException import *\n\n\ndef get_opposite_of(color):\n\t\"Function to get opposite of given colour\"\n\tif color == \"B\":\n\t\treturn \"W\"\n\telse:\n\t\treturn \"B\"\n\ndef game_setup():\n\t\"Function for first time setup of game, take rows, columns, first mober, top left corner player, winning criteria as input and creates a board object out of it\"\n\tprint(\"FULL\")\n\n\twhile True:\n\t\t#Take input for number of rows\n\t\ttry:\n \t\t\trows = int(input())\n \t\t\tif ((rows >= 4) and (rows <= 16) and (rows%2==0)):\n \t\t\t\tbreak\n \t\t\telse:\n \t\t\t\traise OutOfBoundError(\"INVALID\")\n\t\texcept OutOfBoundError as e:\n \t\t\tprint(\"\".join(e))\n \t \n\n\twhile True:\n\t\t#Takes input for number of columns\n\t\ttry:\n\t\t\tcolumns = int(input())\n\t\t\tif ((columns >= 4) and (columns <= 16) and (columns%2==0)):\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\traise OutOfBoundError(\"INVALID\")\n\t\texcept OutOfBoundError as e:\n \t\t\tprint(\"\".join(e))\n \n\twhile True:\n\t\t#Takes input for first user to start the game\n\t\ttry:\n\t\t\tfirst_mover = input()\n\t\t\tif (first_mover == \"B\" or first_mover == \"W\"):\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\traise InvalidPlayerError(\"INVALID\")\n\t\texcept InvalidPlayerError as e:\n \t\t\tprint(\"\".join(e))\n\n\twhile True:\n\t\t#Take input for colour in top left corner\n\t\ttry:\n\t\t\ttop_left_corner = input()\n\t\t\tif (top_left_corner == \"B\" or top_left_corner == \"W\"):\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\traise InvalidPlayerError(\"INVALID\")\n\t\texcept InvalidPlayerError as e:\n \t\t\tprint(\"\".join(e))\n\n\twhile True:\n\t\t#Take input for winning criteria i.e more number of pieces or less\n\t\ttry:\n\t\t\twinning_criteria = input()\n\t\t\tif (winning_criteria == \">\" or winning_criteria == \"<\"):\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\traise InvalidWinningCriteria(\"INVALID\")\n\t\texcept InvalidWinningCriteria as e:\n \t\t\tprint(\"\".join(e))\n\n\tboard = []\n\tfor i in range(0,rows): #Creates borad with \".\" everwhere\n\t\tboard.append([])\n\t\tfor j in range(0,columns):\n\t\t\tboard[i].append(\".\")\n\n\t#Put first 4 marker of B and W\n\tboard[int(rows/2)-1][int(columns/2)-1] = top_left_corner\n\tboard[int(rows/2)][int(columns/2)] = top_left_corner\n\tboard[int(rows/2)-1][int(columns/2)] = get_opposite_of(top_left_corner)\n\tboard[int(rows/2)][int(columns/2)-1] = get_opposite_of(top_left_corner)\n\n\t#Create and return an instance of Othello game with the specified parameters\n\treturn Othello.Othello(rows,columns,first_mover,top_left_corner,winning_criteria,board)\n\n\ndef play_game(othello_game):\n\t\"Function where the game gets carried, continues until there are no more possible for both B & Y or if the board has got full. Winner decided here based on the winning criteria i.e more discs or less to win. Also before a user makes a move this method calculates the all possible moves for that player then checks if the input value belongs to these possible values if it does then the game proceeds by making a move else user is reprompted to enter the values. Once a move is made the turn is switched.\"\n\tcount = 0 #This variable keeps count of consecutive times when players had nothing to move, if this count becomes two, i.e both player had no moves back to back, then game gets over\n\twhile True:\n\t\tprint(\"B: %d W: %d\" %(othello_game.get_count(\"B\"),othello_game.get_count(\"W\")))\n\t\tothello_game.print_board()\n\t\t\n\t\tboxes = [] #variable to store all possible cells in which a move is possible in the current scenario\n\t\tfor i in range(0,othello_game.get_rows()):\n\t\t\tfor j in range(0,othello_game.get_columns()):\n\t\t\t\tif(othello_game.is_move_valid(i,j)):\n\t\t\t\t\tboxes.append([i+1,j+1]) #all possible box in which we can move the player in the current scenario\n\t\t\n\t\tif not othello_game.is_empty():\n\t\t\t#checks if the board is not empty i.e completely fulled, then it means the game is over\n\t\t\tprint(\"WINNER:\", othello_game.get_winner())\n\t\t\tbreak\n\n\t\tif (boxes == []):\n\t\t\t#As boxes was the variable storing all possible cells in which move was possible in the current scenario, and as its empty means no move possible for the player in current scenario, hence increase the count\n\t\t\tcount += 1\n\n\t\t\tif count==2:\n\t\t\t\t#As explained if count gets 2 then game over\n\t\t\t\tprint(\"WINNER:\", othello_game.get_winner())\n\t\t\t\tbreak\n\n\t\t\tothello_game.switch_turn() #used to switch turn between players, since the current player has no possible move it passes\n\t\t\tcontinue #skip the whole loop\n\t\t\n\t\tprint(\"TURN: \", othello_game.get_turn()) #print the player whos turn it is\n\n\t\twhile True:\n\t\t\tstr = input() #takes as input the cell to move in\n\t\t\tstr = str.split() #splits the cell into a list of integers, i.e splits \"2 4\" into [2,4]\n\t\t\tx = int(str[0]) #assigns the first integer \n\t\t\ty = int(str[1])\t #assigns the second integer\n\n\t\t\tif([x,y] in boxes): #as boxes was storing the cells which can be moved into, we check if the entered cell can be moved into\n\t\t\t\tmoves = othello_game.is_move_valid(x-1,y-1) #we get the flipped cells i.e the cells in board which will change to the current players colour due to this move \n\t\t\t\tprint(\"VALID\")\n\t\t\t\tothello_game.move(moves) #change the whole structure of board due to this move\n\t\t\t\t\n\t\t\t\tothello_game.switch_turn() #switch turns\n\t\t\t\tcount = 0 #reassign count to 0, since there was a move made\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"INVALID\") #print that the entered cell cannot be moved into or is not correct, hence INVALID\n\n\nif __name__=='__main__':\n othello_game = game_setup() #setup/initailization of game\n play_game(othello_game) #playing the game\n\n","sub_path":"ICS 32 midterm/FINal Projectr 4/OthelloGame.py .py","file_name":"OthelloGame.py .py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27789493","text":"import os\r\n\r\nobj=open(\"file1.txt\",\"r\") \r\ns1=obj.read()\r\n\r\n#os.rename(\"abcd.txt\",\"demo.txt\")\r\n#os.remove(\"demo.txt\")\r\n#os.mkdir(\"dddd\")\r\n#os.chdir(\"dddd\")\r\n\r\ns=os.getcwd()\r\nprint(s)\r\nobj.close()\r\nobj1=open(\"file2.txt\",\"w\") \r\nobj1.write(s1.upper())\r\nobj1.close()\r\n#os.remove(\"file2.txt\")\r\n#os.rmdir(\"dddd\")\r\n \r\n#obj2=open(\"file2.txt\",\"r\")\r\n#s1=obj2.read(10) \r\n#print(s1) \r\n#obj2.close() \r\n\r\n\r\n\r\n\r\n","sub_path":"files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"70255875","text":"# ############################################################################################################# #\n# Assingment 10 Geoprocessing with python #\n# (c) Simon Spengler, Humboldt-Universität zu Berlin, 12/07/2019 #\n# ####################################### LOAD REQUIRED LIBRARIES ############################################# #\nimport time\nimport os\nfrom osgeo import ogr, osr\nimport gdal\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# ####################################### SET TIME-COUNT ###################################################### #\nstarttime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"--------------------------------------------------------\")\nprint(\"Starting process, time: \" + starttime)\nprint(\"\")\n# ####################################### FOLDER PATHS & global variables ##################################### #\npath = \"C:/Users/Simon Spengler/PycharmProjects/GeoPy/week10/\" #define data folder\nos.chdir(path)\n# set seed for reproducibility\nnp.random.seed(42)\n# ####################################### FUNCTIONS ########################################################### #\n#function from Assignment 4\ndef extent(inp_raster):\n ext_ulx, ext_uly = float('-inf'), float('inf')\n ext_lrx, ext_lry = float('inf'), float('-inf')\n\n for raster in inp_raster:\n raster = gdal.Open(raster, gdal.GA_ReadOnly)\n gt = raster.GetGeoTransform()\n ulx = gt[0] # upper left x coordinate\n uly = gt[3] # upper left y coordinate\n lrx = ulx + (gt[1] * raster.RasterXSize) # upper left x coordinate + number of pixels * pixel size\n lry = uly + (gt[5] * raster.RasterYSize) # upper left y coordinate + number of pixels * pixel size\n\n if ulx > ext_ulx:\n ext_ulx = ulx\n if uly < ext_uly:\n ext_uly = uly\n if lrx < ext_lrx:\n ext_lrx = lrx\n if lry > ext_lry:\n ext_lry = lry\n\n return [ext_ulx, ext_uly, ext_lrx, ext_lry]\n\n\ndef create_extent(img):\n\n # get extent:\n ext = extent(img)\n\n ring = ogr.Geometry(ogr.wkbLinearRing)\n poly = ogr.Geometry(ogr.wkbPolygon)\n\n ring.AddPoint(ext[0], ext[1])\n ring.AddPoint(ext[2], ext[1])\n ring.AddPoint(ext[2], ext[3])\n ring.AddPoint(ext[0], ext[3])\n ring.AddPoint(ext[0], ext[1])\n\n poly.AddGeometry(ring)\n\n return poly\n\ndef random_sample(array, lc_id=1, n_samples=1000, min_distance=1):\n #create mask for minimum distance\n sample_mask = np.zeros_like(array, dtype=bool)\n #choose pixel in certain distance\n sample_mask[::min_distance, ::min_distance] = True\n #filter mask for land cover classe\n if lc_id is not None:\n a_bool = np.where(array == lc_id, True, False)\n sample_mask = np.where(a_bool * sample_mask)\n #choose pixels from sample mask\n sample = np.random.choice(np.arange(0, len(sample_mask[0]), 1), size=n_samples, replace=False)\n choices = [tuple(sample_mask[0][sample]), tuple(sample_mask[1][sample])]\n #create mask for chosen pixels\n mask = np.zeros_like(array, dtype=bool)\n mask[choices] = True\n\n return mask\n\n\n\n# ####################################### PROCESSING ########################################################## #\nlandsat = gdal.Open('landsat_median1416_15000_10000.tif')\nlc = gdal.Open('landcover_lucas2015_15000_10000.tif')\n\nlc_arr = lc.ReadAsArray()\nlandsat_arr = landsat.ReadAsArray()\n#create output dataframe\nsummary = pd.DataFrame(columns={'classID':[],\n 'blue':[],\n 'green':[],\n 'red': [],\n 'nIR': [],\n 'swir1': [],\n 'swir2': [],\n }, dtype=float)\n\n#get unique classes\nlc_classes = np.unique(lc_arr)\n#calculate minimum distance in landsat pixels\nmin_distance = round(500/30)\n\n#loop through all classes\nfor cl in lc_classes:\n\n while True:\n\n try:\n #draw random sample\n mask = random_sample(lc_arr, cl, n_samples=1000, min_distance=min_distance)\n #extract reflectance values\n extracted = landsat_arr[:, mask].T\n cl_array = np.full(extracted.shape[0], cl).reshape(-1,1)\n extracted = np.concatenate([cl_array, extracted], axis=1)\n\n # add extracted values to dataframe\n summary = pd.concat([summary, pd.DataFrame(extracted, columns=summary.columns)], axis=0)\n #if there are too few samples the minimum distance is lowered\n except ValueError:\n min_distance = min_distance-1\n print('Value ' + str(cl) + ': lowered min_distance by -1 to '+str(min_distance))\n if min_distance < 1:\n print('Value: '+str(cl))\n print('Sample size larger than population size!')\n break\n continue\n\n break\n\n# calculate spectral means for each class\ncl_means = summary.groupby(\"classID\").mean()\n\n# plot result\nplt.figure()\ncl_means.T.plot()\nplt.show()\n\n\n\n\n# open datasets\nlucas = ogr.Open('EU28_2015_20161028_lucas2015j.gpkg')\nlandsat = gdal.Open('landsat_median1416_15000_10000.tif')\ngt = landsat.GetGeoTransform()\n\n# Get extent of Landsat data and transform to LUCAS geotransform\nextent = create_extent(['landsat_median1416_15000_10000.tif'])\ntransformer = osr.CoordinateTransformation(osr.SpatialReference(wkt=landsat.GetProjection()), lucas.GetLayer().GetSpatialRef())\nextent.Transform(transformer)\n\n# \"SetSpatialFilter\" on LUCAS dataset using transformed extent\nlyr_lucas = lucas.GetLayer()\nlyr_lucas.SetSpatialFilter(extent)\ntransformer = osr.CoordinateTransformation(lucas.GetLayer().GetSpatialRef(), osr.SpatialReference(wkt=landsat.GetProjection()))\n\n# create summary dataframe\nsummary = pd.DataFrame(columns={'class':[],\n 'x':[],\n 'y':[],\n 'band2':[],\n 'band3':[],\n 'band4': [],\n 'band5': [],\n 'band6': [],\n 'band7': [],\n })\n\n\n#loop through all points\nfor feat in lyr_lucas:\n #get geometry\n geom = feat.GetGeometryRef().Clone()\n x, y = geom.GetX(), geom.GetY()\n #transform to utm\n geom.Transform(transformer)\n mx, my = geom.GetX(), geom.GetY()\n #get image coordinates\n px = int((mx - gt[0]) / gt[1]) # x pixel\n py = int((my - gt[3]) / gt[5]) # y pixel\n #extract values\n lc_value = lc.ReadAsArray(px, py, 1, 1).flatten()\n landsat_values = landsat.ReadAsArray(px, py, 1, 1).flatten()\n #append to summary\n summary = summary.append({'class': int(lc_value),\n 'x': x,\n 'y': y,\n 'band2': landsat_values[0],\n 'band3': landsat_values[1],\n 'band4': landsat_values[2],\n 'band5': landsat_values[3],\n 'band6': landsat_values[4],\n 'band7': landsat_values[5]}, ignore_index=True)\n\n# write summary to csv\nsummary.to_csv('LUCAS_Landsat_extracted.csv')\n# ####################################### END TIME-COUNT AND PRINT TIME STATS################################## #\nprint(\"\")\nendtime = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\nprint(\"--------------------------------------------------------\")\nprint(\"start: \" + starttime)\nprint(\"end: \" + endtime)\nprint(\"\")","sub_path":"assingment10.py","file_name":"assingment10.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"473995061","text":"#! /usr/bin/env python3 -O\n# `O` means optimized\n\ndef f(x):\n assert isinstance(x, str)\n print(x)\n\n\nif __name__ == '__main__':\n f('5')\n f(5) # if run without parameters, AssertionError will be raised\n","sub_path":"language/ignore_asserts.py","file_name":"ignore_asserts.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469042768","text":"from __future__ import division\nfrom sympy.solvers import solve\nfrom sympy import Symbol\nimport math\nimport warnings\nimport cv2\nimport numpy as np\nimport time\n\n\n#########################################################\n#########################################################\n# Check intersection of two points, if there is return the\n# point, angle, and True; if not, return none and False\n\ndef check_intersect(line_1, line_2):\n # Endpoints of the first line\n pt1 = (line_1[0], line_1[1])\n pt2 = (line_1[2], line_1[3])\n # Endpoints of the second line\n pt3 = (line_2[0], line_2[1])\n pt4 = (line_2[2], line_2[3])\n\n # Calculate slope and y-intersect of each line\n m1 = (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])\n b1 = pt1[1] - pt1[0] * m1\n\n m2 = (pt4[1] - pt3[1]) / (pt4[0] - pt3[0])\n b2 = pt3[1] - pt3[0] * m2\n\n # Ignore warning when getting a infinity slope\n warnings.filterwarnings(\"ignore\")\n\n # Consider if the lines are horizontal or vertical to cause a non-resolvable slope for intersection\n if m1 == m2:\n # print(\"Same Slope\")\n return None, None, None, False\n elif m1 == -float('Inf') and abs(m2) <= 0.1:\n if pt3[0] <= pt1[0] <= pt4[0] and min(pt1[1], pt2[1]) <= pt3[1] <= max(pt1[1], pt2[1]):\n x_intersect = pt1[0]\n y_intersect = pt3[1]\n theta = 90\n return x_intersect, y_intersect, theta, True\n elif abs(m1) <= 0.1 and m2 == -float('Inf'):\n if pt1[0] <= pt3[0] <= pt2[0] and min(pt3[1], pt4[1]) <= pt1[1] <= max(pt3[1], pt4[1]):\n x_intersect = pt3[0]\n y_intersect = pt1[1]\n theta = 90\n return x_intersect, y_intersect, theta, True\n\n # Solve for intersection\n x = Symbol('x')\n solution = solve((m1 - m2) * x + b1 - b2, x)\n if len(solution) != 1:\n # print(\"Identical Lines\")\n return None, None, None, False\n\n # Check if intersects fall in the range of two lines\n elif pt1[0] <= solution <= pt2[0] and pt3[0] <= solution <= pt4[0]:\n # print(\"Solution is \" + str(float(solution[0])))\n\n x_intersect = int(solution[0])\n y_intersect = int(m2 * solution[0] + b2)\n\n theta1 = math.atan(m1)\n theta2 = math.atan(m2)\n theta = int(math.degrees(theta2 - theta1))\n\n # Adjust the threshold angle below to check for perpendicular lines\n if (100 > theta > 80) or (-100 < theta < -80):\n return x_intersect, y_intersect, theta, True\n else:\n # print(\"Lines are not nearly perpendicular\")\n return None, None, theta, False\n else:\n # print(\"Intersection is not within the lines\")\n return None, None, None, False\n\n\ndef extend_line(line):\n x1, y1, x2, y2 = line[0]\n length = int(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))\n # TODO: Adjust the following threshold to pass the lines\n one_block_len = 90\n ratio = float(2.5 * (one_block_len - length) / length)\n if one_block_len <= length <= 1.5 * one_block_len:\n # print(\"One Block\")\n return line\n elif length > 1.5 * one_block_len:\n # print(\"Two Blocks\")\n ratio = float((2 * one_block_len - length) / length)\n\n # TODO: Extends lines based on its length, might need change ratio\n # ratio = 0.6\n delta_x = int(abs(x2 - x1) * ratio)\n delta_y = int(abs(y2 - y1) * ratio)\n x1_p = x1 - delta_x\n x2_p = x2 + delta_x\n if y1 > y2:\n y1_p = y1 + delta_y\n y2_p = y2 - delta_y\n else:\n y1_p = y1 - delta_y\n y2_p = y2 + delta_y\n extended = [x1_p, y1_p, x2_p, y2_p]\n Extended_Length = int(math.sqrt((x2_p - x1_p) ** 2 + (y2_p - y1_p) ** 2))\n # print(\"Ratio is: \" + str(ratio))\n # print(\"Extended Length is: \" + str(Extended_Length))\n return [extended]\n # return line\n\n\n# def increase_contrast(img):\n# lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n# cv2.imshow(\"lab\", lab)\n#\n# # -----Splitting the LAB image to different channels-------------------------\n# l, a, b = cv2.split(lab)\n# cv2.imshow('l_channel', l)\n# cv2.imshow('a_channel', a)\n# cv2.imshow('b_channel', b)\n#\n# # -----Applying CLAHE to L-channel-------------------------------------------\n# clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))\n# cl = clahe.apply(l)\n# cv2.imshow('CLAHE output', cl)\n#\n# # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------\n# limg = cv2.merge((cl, a, b))\n# cv2.imshow('limg', limg)\n#\n# # -----Converting image from LAB Color model to RGB model--------------------\n# final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)\n#\n# return final\n\n\ndef rm_nearby_intersect(intersections):\n if len(intersections) != 0:\n i = 0\n for point_1 in intersections:\n j = 0\n for point_2 in intersections:\n if i < j:\n x1, y1 = point_1.x, point_1.y\n x2, y2 = point_2.x, point_2.y\n if abs(x1 - x2) <= 15 and abs(y1 - y2) <= 15:\n intersections.remove(point_2)\n j = j + 1\n i = i + 1\n return intersections\n\n\n# def adjust_gamma(image, gamma=1.0):\n# # build a lookup table mapping the pixel values [0, 255] to\n# # their adjusted gamma values\n# invGamma = 1.0 / gamma\n# table = np.array([((i / 255.0) ** invGamma) * 255\n# for i in np.arange(0, 256)]).astype(\"uint8\")\n#\n# # apply gamma correction using the lookup table\n# return cv2.LUT(image, table)\n\ndef rm_duplicates(rects, intersects):\n centers = []\n # centers.append([rects[0].center.x, rects[0].center.y])\n for rect in rects:\n rect_center = [rect.center.x, rect.center.y]\n if rect_center not in centers:\n centers.append(rect_center)\n return centers\n\n\ndef rm_shadow(image):\n h, w = image.shape[0], image.shape[1]\n\n for y in range(h):\n for x in range(w):\n pixel = image[y, x]\n r, g, b = pixel[0], pixel[1], pixel[2]\n lim_min = 60\n lim_max = 100\n\n if lim_min < r < lim_max and lim_min < g < lim_max and lim_min < b < lim_max:\n image[y, x] = [150, 150, 150]\n\n return image\n\n\nclass Intersect:\n def __init__(self, x_intersect, y_intersect, theta=None, category=None):\n self.x = x_intersect\n self.y = y_intersect\n if theta is not None:\n self.theta = theta\n if category is not None:\n self.category = category\n\n\nclass Line:\n def __init__(self, start_point, end_point):\n self.start = start_point\n self.end = end_point\n self.theta = math.atan2((start_point.y - end_point.y), (start_point.x - end_point.x))\n self.length = math.hypot((start_point.x - end_point.x), (start_point.y - end_point.y))\n\n\ndef is_in_range_of_a_circle(point1, point2, radius_threshold=None):\n if radius_threshold is None:\n radius_threshold = 15\n return math.hypot((point2.x - point1.x), (point2.y - point1.y)) < radius_threshold\n\n\ndef categorize_rect(intersections):\n start_time = time.time()\n list_of_squares = []\n tmp_intersection = intersections\n for starting_point in tmp_intersection:\n for next_point in tmp_intersection:\n if starting_point != next_point:\n base_line = Line(starting_point, next_point)\n possible_1 = Intersect(starting_point.x - math.sin(base_line.theta) * base_line.length, starting_point.y\n + math.cos(base_line.theta) * base_line.length)\n possible_1_c = Intersect(next_point.x - math.sin(base_line.theta) * base_line.length, next_point.y\n + math.cos(base_line.theta) * base_line.length)\n possible_2 = Intersect(starting_point.x + math.sin(base_line.theta) * base_line.length, starting_point.y\n - math.cos(base_line.theta) * base_line.length)\n possible_2_c = Intersect(next_point.x + math.sin(base_line.theta) * base_line.length, next_point.y\n - math.cos(base_line.theta) * base_line.length)\n midPoint = mid_point(starting_point, next_point)\n possible_3 = Intersect(midPoint.x - math.sin(base_line.theta) * base_line.length / 2, midPoint.y +\n math.cos(base_line.theta) * base_line.length / 2)\n possible_3_c = Intersect(midPoint.x + math.sin(base_line.theta) * base_line.length, midPoint.y\n - math.cos(base_line.theta) * base_line.length)\n for third_point in tmp_intersection:\n if is_in_range_of_a_circle(possible_1, third_point):\n for forth_point in tmp_intersection:\n if is_in_range_of_a_circle(possible_1_c, forth_point):\n list_of_squares.append(Rectangle(starting_point, next_point, third_point, forth_point))\n if is_in_range_of_a_circle(possible_2, third_point):\n for forth_point in tmp_intersection:\n if is_in_range_of_a_circle(possible_2_c, forth_point):\n list_of_squares.append(Rectangle(starting_point, next_point, third_point, forth_point))\n if is_in_range_of_a_circle(possible_3, third_point):\n for forth_point in tmp_intersection:\n if is_in_range_of_a_circle(possible_3_c, forth_point):\n list_of_squares.append(Rectangle(starting_point, next_point, third_point, forth_point))\n elapsed_time = time.time() - start_time\n print(\"the time elapsed for categorizing square is \" + str(elapsed_time))\n return list_of_squares\n\n\ndef mid_point(point1, point2):\n return Intersect((point1.x + point2.x) / 2, (point1.y + point2.y) / 2)\n\n\nclass Rectangle:\n def __init__(self, point1, point2, point3, point4=None, index=None, ):\n self.center = Intersect(0, 0)\n if index is not None:\n self.index = index\n self.point1 = point1\n self.point2 = point2\n self.point3 = point3\n if point4 is None:\n self.center = self.find_its_center_3()\n else:\n self.p = [point1, point2, point3, point4]\n self.center = self.find_its_center_4()\n\n def find_its_center_3(self):\n length1 = math.hypot((self.point1.x - self.point2.x), (self.point1.y - self.point2.y))\n length2 = math.hypot((self.point2.x - self.point3.x), (self.point2.y - self.point3.y))\n length3 = math.hypot((self.point1.x - self.point3.x), (self.point1.y - self.point3.y))\n if length1 >= length2 and length1 >= length3:\n center = mid_point(self.point1, self.point2)\n elif length2 >= length1 and length2 >= length3:\n center = mid_point(self.point2, self.point3)\n else:\n center = mid_point(self.point1, self.point3)\n return center\n\n def find_its_center_4(self):\n x = [p.x for p in self.p]\n y = [p.y for p in self.p]\n return Intersect(sum(x) / len(x), sum(y) / len(y))\n\n\ndef square_img_to_centers_list(img):\n img_shadowless = rm_shadow(img)\n kernel = np.ones((5, 5), np.uint8)\n img_erosion = cv2.erode(img_shadowless, kernel, iterations=1)\n img_dilation = cv2.dilate(img_erosion, kernel, iterations=2)\n img_blurred_bilateral = cv2.bilateralFilter(img_dilation, 20, 50, 50)\n edges = cv2.Canny(img_blurred_bilateral, 200, 300)\n cv2.imshow(\"edges\", edges)\n # lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=32, minLineLength=20, maxLineGap=60)\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=32, minLineLength=30, maxLineGap=40)\n ext_lines = []\n for line in lines.copy():\n new_line = extend_line(line)\n ext_lines.append(new_line)\n intersections = []\n i = 0\n for line_1 in ext_lines:\n j = 0\n for line_2 in ext_lines:\n if i < j:\n x_center, y_center, theta, found = check_intersect(line_1[0], line_2[0])\n if found:\n new_point = Intersect(x_center, y_center, theta=theta)\n intersections.append(new_point)\n j += 1\n i += 1\n intersections = rm_nearby_intersect(intersections)\n found_rect = categorize_rect(intersections)\n found_rect_centers = rm_duplicates(found_rect, intersections)\n\n height, width, _ = img.shape\n blank_image = np.zeros((height, width, 3), np.uint8)\n for point in intersections:\n cv2.circle(blank_image, (point.x, point.y), 5, (255, 255, 255), -1)\n for center in found_rect_centers:\n cv2.circle(blank_image, (int(center[0]), int(center[1])), 7, (0, 255, 255), -1)\n cv2.imshow(\"Only the dots\", blank_image)\n cv2.waitKey()\n return found_rect_centers\n","sub_path":"interceptHelper.py","file_name":"interceptHelper.py","file_ext":"py","file_size_in_byte":13002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653364074","text":"from tkinter import *\r\nfrom os import system\r\nfrom subprocess import call\r\n\r\n\r\ndef quit_loop():\r\n global selection\r\n selection = var.get()\r\n master.destroy()\r\n if selection == 1:\r\n\r\n print(\"Machine Learning Mode selected\")\r\n # system('python ./MachineLearningMode.py')\r\n # import MachineLearningMode\r\n call(['python','MachineLearningMode.py'])\r\n else:\r\n\r\n print(\"Free Hand Mode selected\")\r\n # system('python ./FreeHandMode.py')\r\n import FreeHandMode\r\n\r\n\r\n\r\n\r\nmaster = Tk()\r\nmaster.geometry(\"300x300+500+200\")\r\nmaster.title(\"Predictor Pen\")\r\n\r\n# Label for choosing between Machine Learning mode or Free hand mode\r\nchoose_label = Label(master, text='Choose the Mode',font=(\"Google Sans\",20),padx=10,pady=10).pack()\r\n\r\n\r\n# Radiobutton for choosing between two modes\r\nvar = IntVar()\r\nvar.set(1)\r\nradioButton_ML = Radiobutton(master, text='Machine Learning Mode', variable=var,value=1,font=(\"Google Sans\",10),padx=10,pady=10).pack()\r\nradioButton_FH = Radiobutton(master, text='Free Hand Mode', variable=var,value=2,font=(\"Google Sans\",10),padx=10,pady=10).pack()\r\n\r\n\r\n\r\n# Button for selecting Mode\r\nbutton = Button(master,text='Select',command=quit_loop,bd=3,pady=10).pack()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n master.mainloop()\r\n\r\nprint(\"END OF HOMEPAGE\")","sub_path":"Home_Page.py","file_name":"Home_Page.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"491595138","text":"# Copyright (c) 2015\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport luigi\nfrom luigi.file import LocalTarget\nfrom luigi.scheduler import CentralPlannerScheduler\nimport luigi.server\nimport luigi.worker\nfrom mock import patch\nfrom helpers import with_config, unittest\nimport os\nimport tempfile\n\n\nclass TestExternalFileTask(luigi.ExternalTask):\n \"\"\" Mocking tasks is a pain, so touch a file instead \"\"\"\n path = luigi.Parameter()\n times_to_call = luigi.Parameter()\n\n def __init__(self, *args, **kwargs):\n super(TestExternalFileTask, self).__init__(*args, **kwargs)\n self.times_called = 0\n\n def complete(self):\n \"\"\"\n Create the file we need after a number of preconfigured attempts\n \"\"\"\n self.times_called += 1\n\n if self.times_called >= self.times_to_call:\n open(self.path, 'a').close()\n\n return os.path.exists(self.path)\n\n def output(self):\n return LocalTarget(path=self.path)\n\n\nclass TestTask(luigi.Task):\n \"\"\"\n Requires a single file dependency\n \"\"\"\n tempdir = luigi.Parameter()\n complete_after = luigi.Parameter()\n\n def __init__(self, *args, **kwargs):\n super(TestTask, self).__init__(*args, **kwargs)\n self.output_path = os.path.join(self.tempdir, \"test.output\")\n self.dep_path = os.path.join(self.tempdir, \"test.dep\")\n self.dependency = TestExternalFileTask(path=self.dep_path,\n times_to_call=self.complete_after)\n\n def requires(self):\n yield self.dependency\n\n def output(self):\n return LocalTarget(\n path=self.output_path)\n\n def run(self):\n open(self.output_path, 'a').close()\n\n\nclass WorkerExternalTaskTest(unittest.TestCase):\n\n def setUp(self):\n self.scheduler = CentralPlannerScheduler(retry_delay=0.01,\n remove_delay=3,\n worker_disconnect_delay=3,\n disable_persist=3,\n disable_window=5,\n disable_failures=2)\n\n def _assert_complete(self, tasks):\n for t in tasks:\n self.assert_(t.complete())\n\n def _build(self, tasks):\n w = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)\n for t in tasks:\n w.add(t)\n w.run()\n w.stop()\n\n def test_external_dependency_already_complete(self):\n \"\"\"\n Test that the test task completes when its dependency exists at the\n start of the execution.\n \"\"\"\n tempdir = tempfile.mkdtemp(prefix='luigi-test-')\n test_task = TestTask(tempdir=tempdir, complete_after=1)\n luigi.build([test_task], local_scheduler=True)\n\n assert os.path.exists(test_task.dep_path)\n assert os.path.exists(test_task.output_path)\n\n os.unlink(test_task.dep_path)\n os.unlink(test_task.output_path)\n os.rmdir(tempdir)\n\n # complete() is called once per failure, twice per success\n assert test_task.dependency.times_called == 2\n\n @with_config({'core': {'retry-external-tasks': 'true',\n 'disable-num-failures': '4',\n 'max-reschedules': '4',\n 'worker-keep-alive': 'true',\n 'retry-delay': '0.01'}})\n def test_external_dependency_completes_later(self):\n \"\"\"\n Test that an external dependency that is not `complete` when luigi is invoked, but \\\n becomes `complete` while the workflow is executing is re-evaluated and\n allows dependencies to run.\n \"\"\"\n assert luigi.configuration.get_config().getboolean('core',\n 'retry-external-tasks',\n False) is True\n\n original_get_work = self.scheduler.get_work\n\n def decorated_get_work(*args, **kwargs):\n # need to call `prune()` to make the scheduler run the retry logic\n self.scheduler.prune()\n return original_get_work(*args, **kwargs)\n\n self.scheduler.get_work = decorated_get_work\n\n tempdir = tempfile.mkdtemp(prefix='luigi-test-')\n\n with patch('random.randint', return_value=0.1):\n test_task = TestTask(tempdir=tempdir, complete_after=3)\n self._build([test_task])\n\n assert os.path.exists(test_task.dep_path)\n assert os.path.exists(test_task.output_path)\n\n os.unlink(test_task.dep_path)\n os.unlink(test_task.output_path)\n os.rmdir(tempdir)\n\n # complete() is called once per failure, twice per success\n assert test_task.dependency.times_called == 4\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/worker_external_task_test.py","file_name":"worker_external_task_test.py","file_ext":"py","file_size_in_byte":5392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122282181","text":"# Return:\n# has_arg?\n# full option name\ndef long_has_args(opt, longopts):\n possibilities = [o for o in longopts if o.startswith(opt)]\n if not possibilities:\n raise GetoptError('option --%s not recognized' % opt, opt)\n # Is there an exact match?\n if opt in possibilities:\n return False, opt\n elif opt + '=' in possibilities:\n return True, opt\n # No exact match, so better be unique.\n if len(possibilities) > 1:\n # XXX since possibilities contains all valid continuations, might be\n # nice to work them into the error msg\n raise GetoptError('option --%s not a unique prefix' % opt, opt)\n assert len(possibilities) == 1\n unique_match = possibilities[0]\n has_arg = unique_match.endswith('=')\n if has_arg:\n unique_match = unique_match[:-1]\n return has_arg, unique_match\n","sub_path":"test_segment_base/getopt_0.py","file_name":"getopt_0.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"227908848","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPython Scrapy QQ好友列表获取(通过QQ空间)\n这里请求头的cookie非常重要,你需要自己登陆QQ空间,获取到登陆给出的cookie,\n然后往获取QQ好友列表的接口的请求头里面传,\n注意!: 抓包时,你抓到的接口的请求头里面是没有携带登陆获取到的cookie请求的, 但是\n如果你不携带登陆获取到的cookie,然后就往获取QQ好友列表接口发请求,是获取不到数据的,\n所以你需要携带上那个cookie)\n由于这个破玩意不是常用爬虫,所以就懒的搞什么模拟登陆获取cookie、g_tk等重要请求头必要信息了,自行获取\nauthor: Sugobet\ntime: 2019-11-23\n\"\"\"\n\nimport scrapy\n\nimport re\nfrom json import loads\n\n\nclass GetQqFriendListSpiderSpider(scrapy.Spider):\n name = 'get_qq_friend_list_spider'\n allowed_domains = ['h5.qzone.qq.com']\n # start_urls = ['http://h5.qzone.qq.com/']\n\n qq_number = 00000000 # 你的QQ号\n offset = 50 # 页数,50起始 +50 # 无需修改\n g_tk = \"\" # 这个是接口上的给的 自行添加\n cookie = \"\" # 登录QQ空间后获得的cookie 自行添加\n headers = {\n # Referer不用改\n \"Referer\":\n \"https://user.qzone.qq.com/proxy/domain/qzs.qq.com/qzone/v8/pages/setting/visit_v8.html?g_iframeUser=1\",\n \"cookie\": cookie\n }\n\n def start_requests(self):\n url = f\"https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/right/get_entryuin\\\nlist.cgi?uin={self.qq_number}&fupdate=1&action=1&offset={self.offset}&g_tk={self.g_tk}\"\n yield scrapy.Request(url, headers=self.headers, callback=self.parse)\n\n self.offset += 50\n\n def parse(self, response):\n data = loads(re.sub(r\"_Callback\\(|\\);\", \"\", response.text))\n if len(data.get(\"data\").get(\"uinlist\")) != 0:\n for datas in data.get(\"data\").get(\"uinlist\"):\n name = datas.get(\"label\") # 好友名字\n qq_number = datas.get(\"data\") # 好友QQ号\n\n yield {\n \"name\": name,\n \"qq_number\": qq_number,\n }\n\n url = f\"https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/right/get_entryuin\\\nlist.cgi?uin={self.qq_number}&fupdate=1&action=1&offset={self.offset}&g_tk={self.g_tk}\"\n yield scrapy.Request(url, headers=self.headers, callback=self.parse)\n\n self.offset += 50\n","sub_path":"qqfriend_list_spider/qqfriend_spider/spiders/get_friend_list_spider.py","file_name":"get_friend_list_spider.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"495269293","text":"import cv2\nimport numpy as np\n\ndef bytes2opencv_img(img_raw):\n img_str = img_raw.read()\n img_nparr = np.fromstring(img_str, np.uint8)\n img = cv2.imdecode(img_nparr, cv2.IMREAD_COLOR)\n # cv2.imshow('Uploaded', img)\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n return img\n\ndef bgr2rgb(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\ndef resize_image(img, new_shape):\n h, w = img.shape[:2]\n new_h, new_w = new_shape[:2]\n\n interpolation = cv2.INTER_LINEAR\n if new_h - h < 0 and new_w - w < 0:\n # 기존 이미지 크기 보다 작게 만듦\n interpolation = cv2.INTER_AREA\n\n return cv2.resize(img, dsize=(new_w, new_h), interpolation=interpolation)\n","sub_path":"api/services/image_processor.py","file_name":"image_processor.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111238974","text":"import pandas as pd\nimport numpy as np\nimport sys\nfrom config import prefix\nfrom dictionaries import participants\nimport itertools\nimport json\nimport os\nfrom tqdm import tqdm\nfrom scipy.spatial.transform import Rotation as R\n\nBODY = ['Head', 'LeftHand', 'RightHand']\nTRAJ_SIZE = 64\n\ndef extract_trajectories():\n x = []\n for participant, task in tqdm(itertools.product(participants, range(1, 7)), total=108):\n path = '{0}raw/{1}/{1}_task{2}.txt'.format(prefix, participant, task)\n with open(path) as f:\n data = pd.DataFrame([json.loads(line) for line in f])\n\n origin, destination = data.iloc[0]['step'], data.iloc[-1]['step']\n\n data = data[data['dynamic'] == True]\n objects = data.groupby('name')\n\n trajectories = {}\n for object, object_data in objects:\n if object in BODY:\n continue\n streams = []\n current_stream = []\n for loc, row in object_data.iterrows():\n if row['visible']:\n current_stream.append(row['step'])\n if len(current_stream) == TRAJ_SIZE:\n streams.append(current_stream)\n current_stream = []\n else:\n current_stream = []\n streams = np.array(streams, dtype=int)\n trajectories[object] = streams\n\n head, lhand, rhand = [objects.get_group(obj).set_index('step') for obj in BODY]\n for object, streams in trajectories.items():\n group = objects.get_group(object).set_index('step')\n for stream in streams:\n trajectory = group.loc[stream]\n positions = trajectory[['posX', 'posY', 'posZ']].to_numpy(dtype=np.float32)\n motion = np.sum(np.var(positions, axis=0))\n if motion < 1e-3:\n continue\n\n head_ = head.loc[stream]; lhand_ = lhand.loc[stream]; rhand_ = rhand.loc[stream]\n velocities = trajectory[['posX', 'posY', 'posZ']].to_numpy(dtype=np.float32)\n head_positions = head_[['posX', 'posY', 'posZ']].to_numpy(dtype=np.float32)\n head_rotations = head_[['rotX', 'rotY', 'rotZ', 'rotW']].to_numpy(dtype=np.float32)\n lhand_positions = lhand_[['posX', 'posY', 'posZ']].to_numpy(dtype=np.float32)\n rhand_positions = rhand_[['posX', 'posY', 'posZ']].to_numpy(dtype=np.float32)\n\n x_ = np.array(np.zeros((TRAJ_SIZE, 12)), dtype=np.float32)\n for i in range(TRAJ_SIZE):\n r = R.from_quat(head_rotations[i])\n rel_pos = r.apply(positions[i] - head_positions[i], inverse=True)\n rel_vel = r.apply(velocities[i], inverse=True)\n lhand_pos = r.apply(lhand_positions[i] - head_positions[i], inverse=True)\n rhand_pos = r.apply(rhand_positions[i] - head_positions[i], inverse=True)\n x_[i] = np.concatenate([rel_pos, rel_vel, lhand_pos, rhand_pos])\n\n frame = {\n 'participant': participant,\n 'task': task,\n 'object': object,\n 'start_step': stream[0],\n 'end_step': stream[-1],\n 'trajectory': x_,\n 'traj_image': convert2img(x_)\n }\n x.append(frame)\n\n x = pd.DataFrame(x)\n x.to_json(os.path.join(prefix, 'trajectories.json'), orient='index')\n\ndef convert2img(x):\n normalized = np.array(np.zeros((x.shape)), dtype=np.float32)\n mins = [-1, -1, 0, -1, -1, -1, -1, -1, 0, -1, -1, 0]\n maxes = [1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2]\n for i, row in enumerate(x):\n for j, val in enumerate(row):\n normalized[i, j] = np.clip(255 * (val - mins[j]) / (maxes[j] - mins[j]), 0, 255)\n return np.array(normalized[:,:,np.newaxis], dtype=np.uint8)\n\nif __name__ == '__main__':\n extract_trajectories()\n","sub_path":"backup/trajectory_loader.py","file_name":"trajectory_loader.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"184428461","text":"import math\ndef paint_calulator(height,width,cover):\n area = height * width\n num_cans = math.ceil(area/cover)\n print(f\"You will need {num_cans} cans of paint\")\n\n\nheight = int(input(\"height of the wall : \"))\nwidth = int(input(\"Width of the wall : \"))\ncoverage = 5\n\npaint_calulator(height=height,width=width,cover=coverage)","sub_path":"Day 08/8.1.paint_area_calculator.py","file_name":"8.1.paint_area_calculator.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429495823","text":"\"\"\"\nCreate a function named reversed_list() that takes two lists of the same size as parameters named lst1 and lst2.\nThe function should return True if lst1 is the same as lst2 reversed. The function should return False otherwise.\nFor example, reversed_list([1, 2, 3], [3, 2, 1]) should return True.\n\"\"\"\n#Write your function here\ndef reversed_list(lst1,lst2):\n i=0\n j=len(lst1)-1\n while i <= len(lst1)//2+1:\n if lst1[i] == lst2[j]:\n i+=1\n j-=1\n continue\n else:\n return False\n return True\n\n#Uncomment the lines below when your function is done\nprint(reversed_list([1, 2, 3], [3, 2, 1]))\nprint(reversed_list([1, 5, 3], [3, 2, 1]))\n\n\n#Write your function here\n#def reversed_list(lst1, lst2):\n# for index in range(len(lst1)):\n# if lst1[index] != lst2[len(lst2) - 1 - index]:\n# return False\n# return True","sub_path":"codeAcademy/Advancedloop/reversed list.py","file_name":"reversed list.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73628779","text":"import os\nimport sys\n\nimport cv2\nimport torch\nimport tqdm\nfrom data.mmhand_dataset_data_loader import MMHandDatasetDataLoader\nfrom easydict import EasyDict as edict\n\nfrom models.Generator import Generator as G\nfrom models.network_utils import get_norm_layer\nif __name__ == \"__main__\":\n\n _, ckp, dataroot, DST, dataset, ratio, device = sys.argv\n device = int(device)\n opt = edict()\n opt.dataroot = dataroot\n opt.isTrain = False\n opt.dataset = dataset\n opt.augmentation_ratio = float(ratio)\n opt.distributed = False\n opt.batchSize = 1\n opt.nThreads = 4\n\n dataloader = MMHandDatasetDataLoader(opt)\n weights = torch.load(\n os.path.join('checkpoints', ckp, 'latest_net_netG.pth'), 'cpu')\n\n input_nc = [3, 42, 6]\n norm = get_norm_layer('batch')\n model = G(input_nc=input_nc,\n output_nc=3,\n ngf=64,\n norm_layer=norm,\n use_dropout=True,\n n_blocks=9)\n model.load_state_dict(weights)\n model = model.to(device)\n model = model.eval()\n\n if not os.path.isdir(DST): os.mkdir(DST)\n for i, sample in tqdm.tqdm(enumerate(dataloader), total=len(dataloader)):\n input_P1 = sample['P1'].to(device).float()\n input_P2 = sample['P2'].to(device).float()\n input_D1 = sample['D1'].to(device).float()\n input_D2 = sample['D2'].to(device).float()\n input_H1 = sample['H1'].to(device).float()\n\n model_input = [\n input_H1,\n torch.cat((input_P1, input_P2), 1),\n torch.cat((input_D1, input_D2), 1)\n ]\n\n fake = None\n real = None\n with torch.no_grad():\n fake_p2 = model(model_input)\n fake = fake_p2.squeeze(0).permute(1, 2, 0).detach().cpu().numpy()\n fake = (fake * 0.5 + 0.5) * 255.\n fake = cv2.cvtColor(fake, cv2.COLOR_RGB2BGR)\n\n real = sample['H2'].squeeze(0).permute(1, 2, 0).cpu().numpy()\n real = (real * 0.5 + 0.5) * 255.\n real = cv2.cvtColor(real, cv2.COLOR_RGB2BGR)\n\n *_, folder, name = sample['H2_path'][0].split('/')\n dst_i = os.path.join(DST, folder)\n if not os.path.isdir(dst_i): os.mkdir(dst_i)\n\n cv2.imwrite(os.path.join(dst_i, name), fake)\n","sub_path":"aug.py","file_name":"aug.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323865507","text":"n, q = map(int, input().split())\n\nG = [[] for _ in range(n)]\nfor _ in range(n - 1):\n a, b = map(lambda x: int(x) - 1, input().split())\n G[a].append(b)\n G[b].append(a)\n\nfrom collections import deque\n\n\ndef bfs(p):\n res = []\n used = [False] * (n)\n dq = deque()\n dq.append([p, 0])\n while dq:\n v, i = dq.popleft()\n if used[v]: continue\n used[v] = True\n res.append([v, i])\n for u in G[v]:\n dq.append([u, i + 1])\n return res\n\n\nP = bfs(0)\nP.sort()\n\nfor _ in range(q):\n c, d = map(lambda x: int(x) - 1, input().split())\n if (P[c][1] + P[d][1]) % 2: print('Road')\n else: print('Town')\n","sub_path":"src/data/584.py","file_name":"584.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"83219822","text":"from django.apps import AppConfig\nfrom django.conf import settings\n\n\nclass PGGFGConfig(AppConfig):\n name = 'pggfg'\n\n\n def ready(self):\n \"\"\"Just injecting i18n and otree tags so they are not fucking loaded in each page template\"\"\"\n t = settings.TEMPLATES[0]\n t['OPTIONS']['builtins'] = [\n 'otree.templatetags.otree',\n 'django.templatetags.i18n'\n ]\n","sub_path":"pggfg/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561588313","text":"# Copyright 2020 128 Technology, Inc.\n# Copyright 2016 Dillon Giacoppo github.com/dillon-giacoppo\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# The contents of this file were mostly copied from:\n# https://github.com/dillon-giacoppo/rules_python_external\n# specifically the files namespace_pkgs.py and wheel.py.\n# 128 Technology has made changes to these files and combined them into this file.\n\n# Summary of changes:\n# Removed type hints, brought _get_dist_info into this file, changed private functions\n# to have \"_\" prefix, and minor formatting\n\n\"\"\"Utility functions to discover python package types\"\"\"\nimport glob\nimport os\nimport sys\nimport textwrap\n\n\ndef setup_namespace_pkg_compatibility(wheel_dir):\n \"\"\"Converts native namespace packages and pkg_resource-style packages to pkgutil-style packages\n Namespace packages can be created in one of three ways. They are detailed here:\n https://packaging.python.org/guides/packaging-namespace-packages/#creating-a-namespace-package\n 'pkgutil-style namespace packages' (2) works in Bazel, but 'native namespace packages' (1) and\n 'pkg_resources-style namespace packages' (3) do not.\n We ensure compatibility with Bazel of methods 1 and 3 by converting them into method 2.\n Args:\n wheel_dir: the directory of the wheel to convert\n \"\"\"\n\n namespace_pkg_dirs = _pkg_resources_style_namespace_packages(wheel_dir)\n if not namespace_pkg_dirs and _native_namespace_packages_supported():\n namespace_pkg_dirs = _implicit_namespace_packages(\n wheel_dir, ignored_dirnames=[\"%s/bin\" % wheel_dir]\n )\n\n for ns_pkg_dir in namespace_pkg_dirs:\n _add_pkgutil_style_namespace_pkg_init(ns_pkg_dir)\n\n\ndef _pkg_resources_style_namespace_packages(wheel_dir):\n \"\"\"Discovers namespace packages implemented using the 'pkg_resources-style namespace packages' method.\n \"While this approach is no longer recommended, it is widely present in most existing namespace packages.\" - PyPA\n See https://packaging.python.org/guides/packaging-namespace-packages/#pkg-resources-style-namespace-packages\n \"\"\"\n namespace_pkg_dirs = set()\n\n dist_info = _get_dist_info(wheel_dir)\n namespace_packages_record_file = os.path.join(dist_info, \"namespace_packages.txt\")\n if os.path.exists(namespace_packages_record_file):\n with open(namespace_packages_record_file) as nspkg:\n for line in nspkg.readlines():\n namespace = line.strip().replace(\".\", os.sep)\n if namespace:\n namespace_pkg_dirs.add(os.path.join(wheel_dir, namespace))\n return namespace_pkg_dirs\n\n\ndef _get_dist_info(wheel_dir):\n \"\"\"\"Returns the relative path to the dist-info directory if it exists.\n Args:\n wheel_dir: The root of the extracted wheel directory.\n Returns:\n Relative path to the dist-info directory if it exists, else, None.\n \"\"\"\n dist_info_dirs = glob.glob(os.path.join(wheel_dir, \"*.dist-info\"))\n if not dist_info_dirs:\n raise ValueError(\n \"No *.dist-info directory found. %s is not a valid Wheel.\" % wheel_dir\n )\n\n if len(dist_info_dirs) > 1:\n raise ValueError(\n \"Found more than 1 *.dist-info directory. %s is not a valid Wheel.\"\n % wheel_dir\n )\n\n return dist_info_dirs[0]\n\n\ndef _native_namespace_packages_supported():\n \"\"\"Returns true if this version of Python supports native namespace packages.\"\"\"\n return (sys.version_info.major, sys.version_info.minor) >= (3, 3)\n\n\ndef _implicit_namespace_packages(directory, ignored_dirnames):\n \"\"\"Discovers namespace packages implemented using the 'native namespace packages' method.\n AKA 'implicit namespace packages', which has been supported since Python 3.3.\n See: https://packaging.python.org/guides/packaging-namespace-packages/#native-namespace-packages\n Args:\n directory: The root directory to recursively find packages in.\n ignored_dirnames: A list of directories to exclude from the search\n Returns:\n The set of directories found under root to be packages using the native namespace method.\n \"\"\"\n namespace_pkg_dirs = set()\n for dirpath, dirnames, filenames in os.walk(directory, topdown=True):\n # We are only interested in dirs with no __init__.py file\n if \"__init__.py\" in filenames:\n dirnames[:] = [] # Remove dirnames from search\n continue\n\n for ignored_dir in ignored_dirnames or []:\n if ignored_dir in dirnames:\n dirnames.remove(ignored_dir)\n\n non_empty_directory = dirnames or filenames\n if (\n non_empty_directory\n and\n # The root of the directory should never be an implicit namespace\n dirpath != directory\n ):\n namespace_pkg_dirs.add(dirpath)\n\n return namespace_pkg_dirs\n\n\ndef _add_pkgutil_style_namespace_pkg_init(dir_path):\n \"\"\"Adds 'pkgutil-style namespace packages' init file to the given directory\n See: https://packaging.python.org/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages\n Args:\n dir_path: The directory to create an __init__.py for.\n Raises:\n ValueError: If the directory already contains an __init__.py file\n \"\"\"\n ns_pkg_init_filepath = os.path.join(dir_path, \"__init__.py\")\n\n if os.path.isfile(ns_pkg_init_filepath):\n raise ValueError(\"%s already contains an __init__.py file.\" % dir_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n with open(ns_pkg_init_filepath, \"w\") as ns_pkg_init_f:\n # See https://packaging.python.org/guides/packaging-namespace-packages/#pkgutil-style-namespace-packages\n ns_pkg_init_f.write(\n textwrap.dedent(\n \"\"\"\\\n # __path__ manipulation added by rules_pip to support namespace pkgs.\n __path__ = __import__('pkgutil').extend_path(__path__, __name__)\n \"\"\"\n )\n )\n","sub_path":"src/piprules/namespace_pkgs.py","file_name":"namespace_pkgs.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496113068","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport sys\nfrom plot.plotting_utilities import *\n\nproject_dir = '/home/kiliakis/work/git/cpu-gpu-bench'\nres_dir = project_dir + '/results'\n\n\ninput_file = res_dir + '/track-v1.csv'\n\nimages_dir = project_dir + '/results'\n\n\nif __name__ == '__main__':\n data = np.genfromtxt(input_file, dtype=str, delimiter='\\t')\n header = data[0].tolist()\n data = data[1:]\n dir1 = group_by(header, data, [\n 'version', 'function'], ['', '-'])\n dir1 = keep_only(\n header, dir1, ['n_particles', 'turn_time', 'throughput(Mp/s)'])\n # print(dir1)\n dir2 = {}\n for k, v in dir1.items():\n key = k.split('-')[0] + '-track'\n if key not in dir2:\n dir2[key] = [v[0], np.array(v[1], dtype=float)]\n else:\n dir2[key][1] += np.array(v[1], dtype=float)\n dir1 = {**dir1, **dir2}\n for method in ['kick', 'drift', 'track']:\n plotDir = {}\n x = []\n for k, v in dir1.items():\n if(method in k):\n plotDir[k] = np.array(v[1], dtype=float)\n x = np.array(v[0], dtype=int)\n # print(plotDir)\n # print(x)\n ax = plot_lines_from_dir(plotDir, x=np.log10(x),\n normalize=plotDir['cpuserial-' + method],\n xlabel='log10(#particles)',\n ylabel='Speedup',\n title=method + ' benchmark',\n image_name=res_dir + '/plots/track/' + method + '-v2.pdf',\n ret=True,\n figsize=(8, 6))\n # print(ax, p)\n to_text = ['*** x = 6 ***']\n # colors = []\n null, labels = ax.get_legend_handles_labels()\n for line, label in zip(ax.lines, labels):\n if label != 'cpucserial':\n annotate(ax, line.get_xdata()[-2:], line.get_ydata()[-2:],\n fontsize='8', color=line.get_color())\n to_text.append('%s: %.2lf' %\n (label.split('-')[0], line.get_ydata()[1]))\n # colors.append(line.get_color())\n\n to_text = '\\n'.join(to_text)\n\n ylims = ax.get_ylim()\n print(ylims)\n # ax.text(5.5, 0.8 * (ylims[0] + (ylims[1] - ylims[0]) / 2),\n # to_text,\n # ha='left', va='bottom')\n bbox_props = dict(boxstyle=\"round,pad=0.3\", fc='white', lw=1)\n ax.annotate(to_text, xy=(5.4, 0.85 * (ylims[0] + ylims[1]) / 2),\n fontsize='8.5', bbox=bbox_props)\n plt.savefig(res_dir + '/plots/track/' +\n method + '-v2.pdf', bbox_inches='tight')\n plt.sca(ax)\n plt.show()\n plt.close()\n","sub_path":"scripts/plots/plot_for_track.py","file_name":"plot_for_track.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129980214","text":"from lab10.trace import Trace\n\n\ndef main():\n # Input format example\n # Alphabet\n alphabet_letters = \"abcdef\"\n # Independence relation\n independence_relation_pairs = \"ad, da, bf, fb, ac, ca\"\n # Word representing a trace\n word = \"afadbdfba\"\n\n alphabet = set(alphabet_letters)\n independence_relation = set(independence_relation_pairs.replace(' ', '').split(','))\n\n trace = Trace(alphabet, independence_relation, word)\n\n pretty_dependence_format = ', '.join(['(' + pair[0] + ', ' + pair[1] + ')' for pair in trace.dependence])\n pretty_dependence_format = '{' + pretty_dependence_format + '}'\n print(f\"dependency relation: {pretty_dependence_format}\")\n print(f\"trace as equivalence class: {trace.trace_equiv_class()}\")\n print(f\"foata normal form: {trace.foata_normal}\")\n print(f\"foata normal from graph: {trace.graph_to_foata()}\")\n\n trace.save_graph_to_file()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/main/lab10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"193980067","text":"\ndef producer_with_new_class():\n from DictWithStrings import DictWithStrings\n o = DictWithStrings(\"streaming_dict_with_str\")\n\n o[666] = \"Oh! Yeah! Holidays!\";\n\n print(\"AFTER Setitem\", flush=True)\n # No sync is done here, therefore the data is still in memory, but STREAM would send data anyway to the Consumer\n\ndef producer_with_new_classandNumpy():\n from DictWithNumpy import DictWithNumpy\n from hecuba import StorageNumpy\n import numpy as np\n o = DictWithNumpy(\"streaming_dict_with_numpy\")\n n=np.arange(12).reshape(3,4)+1\n sn = StorageNumpy(n,\"miclassNumpy\")\n o[42]=sn\n print(\"AFTER Setitem\", flush=True)\n # No sync is done here, therefore the data is still in memory, but STREAM would send data anyway to the Consumer\n\ndef producer_subclass_storageNumpy():\n from myNumpy import myNumpy\n import numpy as np\n\n x = myNumpy(np.arange(12,dtype=float).reshape(3,4)+1, \"mynpsubclass\")\n x.send()\n print(\"AFTER Send\", flush=True)\n\ndef main():\n print(\"PRODUCER STARTING\", flush=True)\n producer_with_new_classandNumpy()\n producer_with_new_class()\n #producer_subclass_storageNumpy()\n print(\"PRODUCER DONE\", flush=True)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/streaming/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272243448","text":"# https://en.wikipedia.org/wiki/Wheat_and_chessboard_problem\n\ndef squares_needed(grains):\n \n if grains == 0:\n return 0\n \n square = 0\n while grains > 0:\n grains -= 2**square\n square += 1\n return square\n","sub_path":"rice_chess.py","file_name":"rice_chess.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326973134","text":"'''\r\nCreated on Mar 23, 2020\r\n\r\n@author: Isaac Harasty\r\ntes\r\ntes\r\n\r\n'''\r\n\r\ninputfile = open(\"testing.txt\",mode = 'r',encoding = 'utf-8')\r\ntargetfile = open(\"testing2.txt\", mode = 'w')\r\n\r\n'''\r\ncode for moving inputfile information to targetfile without\r\nits new lines\r\n'''\r\nfor line in inputfile:\r\n for char in line:\r\n if char == \"\\n\":\r\n print(line)\r\n else:\r\n targetfile.write(char)\r\n \r\n \r\n \r\ninputfile.close()\r\ntargetfile.close()","sub_path":"PythonWoo/Testing/FileIO.py","file_name":"FileIO.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"463054558","text":"num_1 = 1\nnum_2 = 2\niterator = 0\ntotal = 0\n\nwhile num_2 <= 4000000:\n\tif (iterator % 3) == 0: #every third fibbonaci number is even\n\t\ttotal += num_2\n\ttemp = num_2 + num_1\n\tnum_1 = num_2\n\tnum_2 = temp\n\titerator += 1\n\nprint('Result: {}'.format(total))","sub_path":"Problem_2.py","file_name":"Problem_2.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395001314","text":"# -*- coding: utf-8 -*- #\n# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"E2e test for 'category-manager taxonomies' command group.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.category_manager import utils\nfrom tests.lib import sdk_test_base\nfrom tests.lib.surface.category_manager import e2e_base as base\n\n\nclass TaxonomyCommandsE2eTest(base.CategoryManagerE2eBase):\n \"\"\"E2e test for taxonomy commands.\"\"\"\n\n def testAllTaxonomyCommands(self):\n description = 'test-taxonomy-description'\n with self.CreateTaxonomyResource(description) as taxonomy:\n self.assertEqual(taxonomy.description, description)\n\n new_description = 'new-test-taxonomy-description'\n expected_taxonomy = utils.GetMessagesModule().Taxonomy(\n name=taxonomy.name,\n displayName=taxonomy.displayName,\n description=new_description)\n\n args = '\"{}\" --description \"{}\"'.format(taxonomy.name, new_description)\n updated_taxonomy = self.Run('category-manager taxonomies update ' + args)\n self.assertEqual(updated_taxonomy, expected_taxonomy)\n\n described_taxonomy = self.Run('category-manager taxonomies describe ' +\n taxonomy.name)\n self.assertEqual(described_taxonomy, expected_taxonomy)\n\n found_taxonomy = self._ListTaxonomiesAndReturnMatch(taxonomy.displayName)\n self.assertEqual(found_taxonomy, expected_taxonomy)\n\n\nif __name__ == '__main__':\n sdk_test_base.main()\n","sub_path":"google-cloud-sdk/lib/tests/e2e/surface/category_manager/taxonomies/taxonomy_commands_e2e_test.py","file_name":"taxonomy_commands_e2e_test.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"147344403","text":"import datetime\n\nimport pandas as pd\n\nfrom portfolio_optimizer.download.history import get_quotes_history, make_url, get_index_history, get_json, Index, \\\n Quotes\nfrom portfolio_optimizer.settings import CLOSE_PRICE, VOLUME\n\n\ndef test_make_url():\n url = make_url(base=Index.base,\n ticker=Index.ticker,\n start_date=datetime.date(2017, 10, 1),\n block_position=50)\n assert url == ('http://iss.moex.com/iss/history/engines/stock/markets/index/'\n 'boards/RTSI/securities/MCFTRR.json?start=50&from=2017-10-01')\n\n\ndef test_make_url_defaults():\n url = make_url(base=Quotes.base + '/',\n ticker='AKRN')\n assert url == ('https://iss.moex.com/iss/history/engines/stock/markets'\n '/shares/securities/AKRN.json?start=0')\n\n\ndef test_get_raw_json_works_on_none_start_date():\n url = make_url(base=Index.base,\n ticker=Index.ticker)\n data = get_json(url)\n index = data['history']['columns'].index('TRADEDATE')\n assert data['history']['data'][0][index] == '2003-02-26'\n\n\ndef test_get_index_history():\n df = get_index_history(datetime.date(2017, 10, 2))\n assert isinstance(df, pd.Series)\n assert df.index.is_monotonic_increasing\n assert df.index.is_unique\n assert df.index[0] == pd.to_datetime('2017-10-02')\n assert df.shape[0] >= 100\n assert df.loc['2018-03-02'] == 3273.16\n\n\ndef test_get_quotes_history():\n df = get_quotes_history('MOEX', datetime.date(2017, 10, 2))\n assert isinstance(df, pd.DataFrame)\n assert len(df.columns) == 2\n assert df.index.is_monotonic_increasing\n assert df.index.is_unique\n assert df.index[0] == pd.to_datetime('2017-10-02')\n assert df.shape[0] > 100\n assert df.loc['2018-03-05', CLOSE_PRICE] == 117\n assert df.loc['2018-03-05', VOLUME] == 4553310\n\n\nclass TestTicker:\n def test_ticker_is_iterable(self):\n t = Quotes('AKRN', datetime.date(2017, 3, 1))\n assert len(list(t)) >= 3\n\n\nclass TestTotalReturn:\n t = Index(start_date=None)\n\n def test_data_property_on_init_for_None_start_date(self):\n # lower-level tests of server response\n data = self.t.data\n index = data['history']['columns'].index('TRADEDATE')\n assert data['history']['data'][0][index] == '2003-02-26'\n\n def test_len_method(self):\n assert len(self.t) == 100\n\n def test_bool_method(self):\n assert bool(self.t)\n\n def test_values_property(self):\n assert isinstance(self.t.values, list)\n assert len(self.t.values[0]) == 16\n\n def test_columns_property(self):\n assert self.t.columns == ['BOARDID',\n 'SECID',\n 'TRADEDATE',\n 'SHORTNAME',\n 'NAME',\n 'CLOSE',\n 'OPEN',\n 'HIGH',\n 'LOW',\n 'VALUE',\n 'DURATION',\n 'YIELD',\n 'DECIMALS',\n 'CAPITALIZATION',\n 'CURRENCYID',\n 'DIVISOR']\n\n def test_dataframe_property(self):\n assert isinstance(self.t.dataframe, pd.DataFrame)\n assert list(self.t.dataframe.columns) == [CLOSE_PRICE]\n assert self.t.dataframe.loc['2003-02-26', CLOSE_PRICE] == 335.67\n","sub_path":"src/portfolio_optimizer/download/tests/test_history.py","file_name":"test_history.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60806383","text":"\n\nfrom xai.brain.wordbase.nouns._climate import _CLIMATE\n\n#calss header\nclass _CLIMATES(_CLIMATE, ):\n\tdef __init__(self,): \n\t\t_CLIMATE.__init__(self)\n\t\tself.name = \"CLIMATES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"climate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_climates.py","file_name":"_climates.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"383147029","text":"# file = open('./data.csv', 'r+')\n# # file.write('id, name, email\\n')\n# # file.write('1, Robin, R@gmail.com')\n# # print(file.read())\n# print(file.readlines())\n# # for line in file:\n# # print(line)\n# file.close()\n\n#check if file exists\n# import os.path\n# filename = 'data.csv'\n#\n# if os.path.isfile(filename):\n# with open(filename, 'r') as file:\n# print(file.read())\n# else:\n# print(f'file {filename} does not exist')\n\n#Ctl _ space for options in package\nfrom urllib import request\nimport json\n\nurl = 'https://official-joke-api.appspot.com/random_ten'\nr = request.urlopen(url)\nprint(r.getcode())\ndata = r.read()\njsonData = json.loads(data)\nprint(jsonData)\nprint(type(jsonData))\n\nimport requests\n\nr = requests.get(url)\ndata = r.text\n\n# for j in jsonData:\n# setup = j['setup']\n# print(setup)\n\nclass Joke:\n def __init__(self, setup, punchline) -> None:\n self.setup = setup\n self.punchline = punchline\n\n def __str__(self) -> str:\n return f'setup {setup}, punchline {punchline}'\n\n\njokes = []\nfor j in jsonData:\n setup = j['setup']\n punchline = j['punchline']\n joke = Joke(setup, punchline)\n jokes.append(joke)\n\nprint(f'Got {len(jokes)} jokes')\n\n\nfor joke in jokes:\n print(joke)\n\nimport pyttsx3","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155243953","text":"#!/usr/bin/env python\n# This script demonstrates how the stencil size affects the accuracy \n# of the RBF-FD approximation\nimport numpy as np\nimport sympy\nimport rbf.basis\nimport rbf.fd\nimport rbf.nodes\nimport matplotlib.pyplot as plt\nimport logging\nimport rbf.domain\nlogging.basicConfig(level=logging.DEBUG)\n\n# create test function\nx,y = sympy.symbols('x,y')\nf = 1 + sympy.sin(4*x) + sympy.cos(3*x) + sympy.sin(2*y)\nLf = f.diff(x) + f.diff(y)\nf = sympy.lambdify((x,y),f,'numpy')\nLf = sympy.lambdify((x,y),Lf,'numpy')\n\n# create nodes\nT = 1000\nvert,smp = rbf.domain.circle()\nnodes,sid = rbf.nodes.menodes(T,vert,smp,neighbors=5,itr=200,delta=0.05)\ninterior, = np.nonzero(sid == -1)\nboundary, = np.nonzero(sid >= 0)\n\n# plot nodes \nfig,ax = plt.subplots()\nax.plot(nodes[:,0],nodes[:,1],'ko')\nfor s in smp:\n ax.plot(vert[s,0],vert[s,1],'k-')\n\nax.set_aspect('equal')\nfig.tight_layout()\n\n# plot test function\nval = f(nodes[:,0],nodes[:,1])\ndiff_true = Lf(nodes[:,0],nodes[:,1])\np = ax.tripcolor(nodes[:,0],nodes[:,1],diff_true)\nax.set_title(u'$\\Delta$ u(x,y)')\nfig.colorbar(p)\n\n# Stencil Size = 6\nfig,ax = plt.subplots(2,2)\nax[0][0].set_title('stencil size = 3')\nax[0][0].set_aspect('equal')\nN = 3\nL = rbf.fd.diff_matrix(nodes,[[1,0],[0,1]],size=N)\ndiff_est = L.dot(val)\nerr = np.abs(diff_est - diff_true)\n\np = ax[0][0].tripcolor(nodes[:,0],nodes[:,1],np.log10(err))\nfor s in smp:\n ax[0][0].plot(vert[s,0],vert[s,1],'k-')\n\ncbar = fig.colorbar(p,ax=ax[0][0])\ncbar.set_label('log10(error)')\n\n# Stencil Size = 10\nax[0][1].set_title('stencil size = 5')\nax[0][1].set_aspect('equal')\nN = 5\nL = rbf.fd.diff_matrix(nodes,[[1,0],[0,1]],size=N)\ndiff_est = L.dot(val)\nerr = np.abs(diff_est - diff_true)\n\np = ax[0][1].tripcolor(nodes[:,0],nodes[:,1],np.log10(err))\nfor s in smp:\n ax[0][1].plot(vert[s,0],vert[s,1],'k-')\n\ncbar = fig.colorbar(p,ax=ax[0][1])\ncbar.set_label('log10(error)')\n\n# Stencil Size = 20\nax[1][0].set_title('stencil size = 10')\nax[1][0].set_aspect('equal')\nN = 10\nL = rbf.fd.diff_matrix(nodes,[[1,0],[0,1]],size=N)\ndiff_est = L.dot(val)\nerr = np.abs(diff_est - diff_true)\n\np = ax[1][0].tripcolor(nodes[:,0],nodes[:,1],np.log10(err))\nfor s in smp:\n ax[1][0].plot(vert[s,0],vert[s,1],'k-')\n\ncbar = fig.colorbar(p,ax=ax[1][0])\ncbar.set_label('log10(error)')\n\n# Stencil Size = 30\nax[1][1].set_title('stencil size = 15')\nax[1][1].set_aspect('equal')\nN = 15\nL = rbf.fd.diff_matrix(nodes,[[1,0],[0,1]],size=N)\ndiff_est = L.dot(val)\nerr = np.abs(diff_est - diff_true)\n\np = ax[1][1].tripcolor(nodes[:,0],nodes[:,1],np.log10(err))\nfor s in smp:\n ax[1][1].plot(vert[s,0],vert[s,1],'k-')\n\ncbar = fig.colorbar(p,ax=ax[1,1])\ncbar.set_label('log10(error)')\n\nfig.tight_layout()\n\n# compute max error as a function of stencil size\nsizes = range(3,60)\nmax_err = np.zeros(len(sizes))\nfor i,s in enumerate(sizes):\n L = rbf.fd.diff_matrix(nodes,[[1,0],[0,1]],size=s)\n diff_est = L.dot(val)\n err = np.abs(diff_est - diff_true)\n max_err[i] = np.max(err)\n\nfig,ax = plt.subplots()\nax.set_ylabel('maximum error')\nax.set_xlabel('stencil size')\nax.semilogy(sizes,max_err) \nax.grid()\n\nfig.tight_layout()\nplt.show()\n \n\n","sub_path":"demo/pde/fd/stencil/2d/order1.py","file_name":"order1.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68219125","text":"import argparse\nimport sys\nimport os\nimport re\nimport gzip\n\n#X\t51054777\t.\tC\t.\t.\tPASS\tEND=51054812;BLOCKAVG_min30p3a\tGT:DP:GQX:MQ\t0/0:14:42:60\n#X\t51054813\t.\tT\t.\t.\tPASS\t.\tGT:DP:GQX:MQ\t0/0:19:57:60\n#X\t51054814\t.\tG\t.\t.\tPASS\t.\tGT:DP:GQX:MQ\t0/0:19:26:60\n#X\t51054815\t.\tT\t.\t.\tPASS\tEND=51054883;BLOCKAVG_min30p3a\tGT:DP:GQX:MQ\t0/0:16:48:60\n#X\t51054884\t.\tC\t.\t.\tPASS\tEND=51054903;BLOCKAVG_min30p3a\tGT:DP:GQX:MQ\t0/0:19:54:60\n#X\t51054904\t.\tT\tC\t769\tPASS\tDP=20;Dels=0.00;FS=0.000;HaplotypeScore=0.0000;MLEAC=2;MLEAF=1.00;MQ=60;MQ0=0;QD=38.45;S\n#B=-3.920e+02\tGT:AD:DP:GQ:PL:MQ:GQX\t1/1:0,20:20:60:802,60,0:60:60\n#X\t51054905\t.\tT\t.\t.\tPASS\tEND=51054956;BLOCKAVG_min30p3a\tGT:DP:GQX:MQ\t0/0:17:51:60\n#X\t51054957\t.\tA\t.\t.\tPASS\tEND=51054991;BLOCKAVG_min30p3a\tGT:DP:GQX:MQ\t0/0:14:42:58\n\n\nparser = argparse.ArgumentParser( description='some description')\nparser.add_argument(\"-s\", \"--snplist\", help=\"input snplist stub\", nargs=1)\nparser.add_argument(\"-i\", \"--individual_list\", help=\"input individual name file\", nargs=1)\nparser.add_argument(\"-o\", \"--output_stub\", help=\"stub of output name\", nargs=1)\nparser.add_argument(\"-r\", \"--ref_bases\", help=\"file containing reference bases per snp\", nargs=1)\nparser.add_argument(\"-m\", \"--method\", help=\"method, tabix or direct\", nargs=1)\nparser.add_argument(\"-d\", \"--directory\", help=\"directory where gvcfs are located\", type=str, default='')\nparser.add_argument(\"-p\", \"--postfix\", help=\"postfix of gvcf\", nargs=1)\nparser.add_argument(\"-x\", \"--make_matrix\", help=\"create matrix from vcf tags\", action=\"store_true\")\nparser.add_argument(\"-R\", \"--rs_annotation\", help=\"do annotation of rs numbers\", action=\"store_true\")\n\nargs = parser.parse_args()\n\ndef return_snplist(stub):\n snplist=list()\n fh=open(stub)\n for line in fh.readlines():\n snplist.append(line[:-1].split(\"\\t\"))\n fh.close()\n return snplist\n\ndef return_refbases(file):\n refdict=dict()\n fh=open(file)\n for line in fh.readlines():\n (snp,refallele)=line[:-1].split(\"\\t\")\n refdict[snp]=refallele\n fh.close()\n return refdict\n\ndef make_map_file(stub,snps,allgt):\n fh=open(stub+\".map\",\"w\")\n gt=allgt[0]\n for i in range(1,len(gt)):\n snp=snps[i-1]\n alleles=set()\n for j in range(len(allgt)):\n (allele1,allele2)=allgt[j][i]\n if allele1 != 'N':\n alleles.add(allele1)\n alleles.add(allele2)\n if len(alleles) < 3:\n fh.write(snp[1]+\"\\t\"+snp[0]+\"\\t0\\t\"+snp[2]+\"\\n\")\n\ndef get_inds(file):\n fh=open(file)\n inds=list()\n for line in fh.readlines():\n inds.append(line[:-1])\n fh.close()\n return inds\n\ndef return_snps_per_individual_tabix(ind,snplist,directory,postfix,refdict):\n print(ind)\n gt=list()\n gt.append([ind])\n mx=list()\n mx.append([ind])\n for snp in snplist:\n refstate=refdict[snp[0]]\n command=\"tabix \"+directory+ind+postfix+\" \"+snp[1]+\":\"+snp[2]+\"-\"+snp[2]\n snpfromvcf=os.popen(command).read()[:-1].split(\"\\t\")\n# print(snpfromvcf)\n# print(len(snpfromvcf))\n# print(snp,refstate,geno,altallele)\n (gt,mx)=append_alleles(gt,snpfromvcf,refstate,mx)\n# print(len(gt))\n return (gt,mx)\n\ndef write_pedfile(allgt,stub,snplist):\n fh=open(stub+\".ped\",\"w\")\n gt=allgt[0]\n locuslist=list()\n for i in range(1,len(gt)):\n alleles=set()\n for j in range(len(allgt)):\n (allele1,allele2)=allgt[j][i]\n if allele1 != 'N':\n alleles.add(allele1)\n alleles.add(allele2)\n if len(alleles) < 3:\n locuslist.append(i) \n else:\n print(\"OMITING SNP\",i)\n \n for gt in allgt:\n fh.write(\"DUM\\t\"+gt[0][0]+\"\\t0\\t0\\t0\\t0\")\n for i in locuslist:\n fh.write(\"\\t\"+gt[i][0]+\"\\t\"+gt[i][1])\n fh.write(\"\\n\")\n fh.close()\n make_map_file(outfilestub,snplist,allgt)\n\n\ndef write_matrix(allgt,stub,snplist,allmx,rsdict,refdict):\n #CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tAS01F01 ...\n fh=open(stub+\".mx\",\"w\")\n fh.write(\"#{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format('#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT'))\n for i in range(len(allmx)):\n fh.write(\"\\t{}\".format(allmx[i][0][0]))\n fh.write('\\n')\n lenallmx=len(allmx)\n gt=allgt[0]\n for i in range(1,len(gt)):\n alleles=set()\n for j in range(len(allgt)):\n (allele1,allele2)=allgt[j][i]\n if allele1 != 'N':\n alleles.add(allele1)\n alleles.add(allele2)\n if len(alleles) < 3:\n snp=snplist[i-1]\n alleles=list(alleles)\n if len(alleles)==2:\n alt=alleles[0]\n if refdict[snp[0]]==alleles[0]:\n alt=alleles[1]\n fh.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(snp[1],snp[2],snp[0],refdict[snp[0]],alt,'.','.','.','.'))\n for j in range(lenallmx):\n fh.write(\"\\t\"+allmx[j][i][0])\n fh.write(\"\\n\")\n\n else:\n print(\"OMITING SNP\",i)\n fh.close()\n\ndef append_alleles(gt,snpfromvcf,refstate,mx):\n if len(snpfromvcf)>1:\n parts=snpfromvcf[9].split(\":\",1)\n geno=parts[0]\n nongeno=str()\n if len(parts)>1:\n nongeno=parts[1]\n \n altallele=snpfromvcf[4]\n refallele=snpfromvcf[3]\n vcfchrom=snpfromvcf[0]\n vcfpos=snpfromvcf[1]\n if altallele != '.' and refallele != refstate:\n print(\"WARNING: disagreement between ref-alleles - \")\n print(snpfromvcf)\n gt.append([\"N\",\"N\"])\n mx.append(['./.'])\n# print(snp,refstate,geno,altallele)\n elif geno == './.':\n gt.append([\"N\",\"N\"])\n mx.append(['./.'])\n elif geno == '0/0':\n gt.append([refstate,refstate])\n mx.append([refstate+'/'+refstate+':'+nongeno])\n elif geno == '0/1' and len(altallele)==1:\n gt.append([refstate,altallele])\n mx.append([refstate+'/'+altallele+':'+nongeno])\n elif geno == '1/1' and len(altallele)==1:\n gt.append([altallele,altallele])\n mx.append([altallele+'/'+altallele+':'+nongeno])\n else:\n gt.append([\"N\",\"N\"])\n mx.append(['./.'])\n\n else:\n gt.append([\"N\",\"N\"])\n mx.append(['./.'])\n print(\"WARNING: no line found!\")\n return (gt,mx)\n\ndef get_vcf_line_components(line):\n snpfromvcf=line.split(\"\\t\")\n geno='./.'\n altallele='.'\n refallele='Q'\n vcfchrom='0'\n vcfpos='0'\n if len(snpfromvcf)>1:\n geno=snpfromvcf[9].split(\":\")[0]\n refallele=snpfromvcf[3]\n vcfchrom=snpfromvcf[0]\n vcfpos=snpfromvcf[1]\n return [vcfchrom,vcfpos,refallele,altallele]\n\ndef return_snps_per_individual_gvcfdirect(ind,snplist,directory,postfix,refdict):\n gt=list()\n gt.append([ind])\n mx=list()\n mx.append([ind])\n print(ind)\n try:\n fileh = gzip.open(directory+ind+postfix)\n line = fileh.readline()[:-1].decode('utf-8')\n previousline=''\n while line.startswith('#'):\n line = fileh.readline()[:-1].decode('utf-8')\n components=get_vcf_line_components(line)\n for snp in snplist:\n refstate=refdict[snp[0]]\n #print(snp,components)\n while line and (components[0]!=snp[1] or int(components[1]) int(snp[2]):\n snpfromvcf=previousline.split(\"\\t\")\n state=snpfromvcf[7].split(\";\")[0]\n if state.startswith('END'):\n blockend=int(state.split('=')[1])\n #print(\"blockend:\",blockend)\n if int(snp[2]) <= blockend:\n #print(\"blockend within bounds\")\n (gt,mx)=append_alleles(gt,snpfromvcf,refstate,mx)\n else: \n #print(\"blockend OUT OF bounds\")\n (gt,mx)=append_alleles(gt,[''],refstate,mx)\n else:\n (gt,mx)=append_alleles(gt,[''],refstate,mx)\n \n else:\n (gt,mx)=append_alleles(gt,[''],refstate,mx)\n #print(gt)\n finally:\n fileh.close()\n return (gt,mx)\n\ndef get_rs_numbers(rsdict):\n fh=open('dbsnp_tmp.vcf')\n for line in fh.readlines():\n parts=line[:-1].split('\\t')\n rsdict[parts[0]+\"_\"+parts[1]]=parts[2]\n fh.close()\n\nif __name__ == '__main__':\n snpstub=args.snplist[0]\n directory=args.directory\n mxflag=args.make_matrix\n postfix=args.postfix[0]\n individuals_file=args.individual_list[0]\n outfilestub=args.output_stub[0]\n ref_bases_file=args.ref_bases[0]\n method=args.method[0]\n rsflag=args.rs_annotation\n rsdict=dict()\n if rsflag:\n rsdict=get_rs_numbers\n \n snplist=return_snplist(snpstub)\n print(len(snplist))\n refdict=return_refbases(ref_bases_file)\n print(len(refdict))\n inds = get_inds(individuals_file)\n allgt=list()\n allmx=list()\n for ind in inds:\n if method == 'tabix':\n (gt,mx)=return_snps_per_individual_tabix(ind,snplist,directory,postfix,refdict)\n elif method == 'direct':\n (gt,mx)=return_snps_per_individual_gvcfdirect(ind,snplist,directory,postfix,refdict)\n #print(len(gt),gt)\n allgt.append(gt)\n if mxflag:\n allmx.append(mx)\n write_pedfile(allgt,outfilestub,snplist)\n write_matrix(allgt,outfilestub,snplist,allmx,rsdict,refdict)\n \n \n\n","sub_path":"Integrate_VCFs/from_gvcf_to_ped.py","file_name":"from_gvcf_to_ped.py","file_ext":"py","file_size_in_byte":9429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291665102","text":"import wx;\nimport math;\nfrom nodes.gridgraph import GridGraph;\nfrom nodes.visual import Visual;\n\nclass NodeGraphWithZoomingFeature(GridGraph):\n __NODE_SCALE_FACTOR = 100;\n __ZOOM_FACTOR = 0.5;\n \n def __init__(self,parent , **kwargs):\n self.__ScaleX = 0;\n self.__ScaleY = 0;\n self.__Zoom = 0;\n self.__MaxZoom = 4.25;\n self.__MinZoom = 0.8; # No scaling\n self.__ScrollAmount = 1;\n self.__AccumulatedZoom = 0;\n self.__Zoom = 0;\n self.__ScaleFactor = 1;\n self.__ShouldMoveGraph = False;\n self.__LastMousePosition = wx.Point(0,0);\n self.__IsMouseDown = False;\n self.__Transform = None;\n self.__Padding = wx.Point(0,0);\n \n super().__init__(parent,**kwargs);\n \n\n \n \n\n def _OnHandleEvent(self):\n self.Bind(wx.EVT_RIGHT_DOWN, self._OnRightMouseDown);\n self.Bind(wx.EVT_MOTION, self._OnMouseMove);\n self.Bind(wx.EVT_RIGHT_UP, self._OnRightMouseUp);\n self.Bind(wx.EVT_MOUSEWHEEL, self._OnMouseWheel);\n super()._OnHandleEvent();\n \n def _OnUpdateGeometry(self):\n if(self.__Transform == None):\n self.__Transform = self.Device.GetTransform(); \n self.Device.SetTransform(self.Transform);\n\n @property\n def Transform(self):\n return self.__Transform;\n \n\n def _OnRightMouseUp(self, event):\n self__LastMousePosition = event.GetPosition() \n self.__ShouldMoveGraph = False;\n self.__IsMouseDown = True;\n self.Invalidate();\n event.Skip();\n\n \"\"\"\n The function will tell if a graph is hold and can be dragged.\n \"\"\"\n \n @property\n def IsGraphHold(self):\n return self.__ShouldMoveGraph;\n\n def _OnRightMouseDown(self, event):\n if(event != None):\n self.__LastMousePosition = event.GetPosition();\n button = event.GetButton();\n if(button == wx.MOUSE_BTN_RIGHT):\n if(event.ControlDown()):\n # Activate dragging the world arround.\n self.__ShouldMoveGraph = True;\n self.__IsMouseDown = True;\n event.Skip();\n \n def _OnMouseMove(self, event):\n position = event.GetPosition() ;\n if(self.__IsMouseDown != False) :\n #Draw the world arround\n if(self.IsGraphHold and event.ControlDown()):\n xDelta = position.x - self.__LastMousePosition.x;\n yDelta = position.y - self.__LastMousePosition.y;\n self.__ScaleX += xDelta;\n self.__ScaleY += yDelta;\n self.__LastMousePosition = position ;\n self.Invalidate()\n event.Skip();\n \n @property\n def ScaleFactor(self):\n return self.__ScaleFactor;\n\n def ScreenToWorld(self, point):\n return wx.Point(point.x / self.ScaleFactor , point.y / self.ScaleFactor) - self.Padding;\n\n def WorldToScreen(self, point):\n return (point + self.Padding);\n\n def SetZoom(self, zoomFactor, tPoint= wx.Point(0,0)):\n print(\"Zooming = {0}\".format(zoomFactor))\n if(self.__CanZoom):\n totalzoom = self.__ScrollAmount * zoomFactor;\n zoomAmount = self.__ClapToAllowZoom(totalzoom) / self.__ScrollAmount;\n self.__ScrollAmount *= zoomAmount;\n self.Transform.Scale(zoomAmount, zoomAmount);\n self.Transform.Translate(tPoint.x, tPoint.y);\n self.Invalidate();\n pass;\n def __ClapToAllowZoom(self, totalZoom):\n if(totalZoom >= self.__MaxZoom):\n totalZoom = self.__MaxZoom;\n if(totalZoom <= self.__MinZoom):\n totalZoom = self.__MinZoom;\n return totalZoom;\n\n \"\"\"\n Scale the graph if the control is down\n and the mouse wheel is scrolled.\n \"\"\"\n def _OnMouseWheel(self,event):\n position = event.GetPosition();\n dpi = self.Device.GetDPI()\n if(event.ControlDown()):\n mousePosition = event.GetPosition() - self.Position;\n delta = event.GetWheelRotation() / dpi[0];\n wheel = 1 if(delta > 0) else -1;\n zoomFactor = math.exp(wheel * self.__ZOOM_FACTOR) ;\n Center = wx.Point( (self.Size.Width / 2) + self.Position.x, (self.Size.Height/ 2) + self.Position.y);\n wCenter = self.ScreenToWorld(Center);\n \n yPosition = ((wCenter.y / zoomFactor) - (wCenter.y / zoomFactor));\n xPosition = ((wCenter.x / (zoomFactor)) - (wCenter.x / zoomFactor))\n # Scale the points;\n xDelta = Center.x * zoomFactor;\n yDelta = Center.y * zoomFactor;\n \n print(\"Scale Point ({0}, {1})\".format(xPosition, yPosition));\n \n self.SetZoom(zoomFactor, wx.Point(xPosition, yPosition ));\n event.Skip();\n elif (event.ShiftDown()):\n # Horizontal scrolling\n Log.Debug(\"Shift Down \");\n pass;\n else:\n # Vertical Scrolling\n pass;\n @property\n def Padding(self):\n return self.__Padding;\n \n @property\n def __CanZoom(self):\n status = False;\n if((self.__ScrollAmount >= self.__MinZoom) and\n (self.__ScrollAmount <= self.__MaxZoom)):\n status = True;\n return status;\n \n\nif (__name__ == \"__main__\"):\n app = wx.App();\n frame = wx.Frame(None, id= wx.ID_ANY, size = wx.Size(500,500));\n graph = NodeGraphWithZoomingFeature(frame);\n\n visual = Visual(size=wx.Size(100,50), color =wx.Colour(\"#FF006757\"));\n graph.Nodes.append(visual);\n frame.Show();\n app.MainLoop();\n","sub_path":"nodes/nodegraphwithzoomingfeature.py","file_name":"nodegraphwithzoomingfeature.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259539994","text":"import time\nfrom typing import List, Generator\n\n\ndef chunks(l, n):\n # type: (list[str],int) -> Generator[List[(str,str)]]\n \"\"\"\n \n\n :param list[str] l:\n :param int n:\n :return:\n \"\"\"\n n = max(1, n)\n return (l[i : i + n] for i in range(0, len(l), n))\n\n\nclass Timer:\n def __init__(self):\n self.dict_start = {\n \"read_general_information\": {},\n \"read_artifacts\": {},\n \"read_metrics\": {},\n \"read_params\": {},\n \"read_tags\": {},\n \"transmit_information\": {},\n \"transmit_metrics\": {},\n \"transmit_artifacts\": {},\n \"Task.create\": {},\n \"Task.get_task\": {},\n \"task.export_task\": {},\n \"task.update_task\": {},\n \"task.connect_configuration\": {},\n }\n self.dict_end = {\n \"read_general_information\": {},\n \"read_artifacts\": {},\n \"read_metrics\": {},\n \"read_params\": {},\n \"read_tags\": {},\n \"transmit_information\": {},\n \"transmit_metrics\": {},\n \"transmit_artifacts\": {},\n \"Task.create\": {},\n \"Task.get_task\": {},\n \"task.export_task\": {},\n \"task.update_task\": {},\n \"task.connect_configuration\": {},\n }\n self.current_milli_time = lambda: int(round(time.time() * 1000))\n\n def start(self, operation, thread_id, experiment_id):\n self.dict_start[operation][\n (thread_id, experiment_id)\n ] = self.current_milli_time()\n\n def end(self, operation, thread_id, experiment_id):\n self.dict_end[operation][(thread_id, experiment_id)] = self.current_milli_time()\n\n def print_times(self):\n res = \"\"\n average_dict = {}\n for op, dict_ in self.dict_start.items():\n n = 0\n for p, start_time in dict_.items():\n thread_id, experiment_id = p\n end_time = self.dict_end[op][p]\n total_time = end_time - start_time\n res += f\"Thread: {thread_id} experiment: {experiment_id} operation: {op} time: {total_time}ms\\n\"\n if op in average_dict.keys():\n average_dict[op] = (total_time + (n * average_dict[op])) / (n + 1)\n else:\n average_dict[op] = total_time\n n += 1\n for op, average in average_dict.items():\n res += f\"{op} average time: {average}ms\\n\"\n if not res == \"\":\n print(res[:-1])\n","sub_path":"scripts/mlflow_migration/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"597940676","text":"import numpy as np\nimport cv2\n\n\n# This function adds 1 to the areas passed in the list of boxes to heatmap.\ndef add_heat(heatmap, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n return heatmap\n \n# Funtion to apply a threshold below which the value will be set to 0.\ndef apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n return heatmap\n\n# Function to plot the boxes containing cars obtained with the label function.\ndef draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n centroidx = (np.max(nonzerox) + np.min(nonzerox))//2\n centroidy = (np.max(nonzeroy) + np.min(nonzeroy))//2\n size = 60\n \n bbox = (centroidx-size, centroidy-size), (centroidx+size, centroidy+size)\n# bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n # Return the image\n return img\n\n","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}